diff options
430 files changed, 7855 insertions, 3980 deletions
diff --git a/.gitignore b/.gitignore index 3cc35ff7cd..1610bd40cd 100644 --- a/.gitignore +++ b/.gitignore @@ -96,7 +96,7 @@ DerivedData Pods/ # Artifacts directory -artifacts/ +/artifacts/ # Git generated files for conflicting *.orig diff --git a/.gitmodules b/.gitmodules index c32881cb95..04d155cfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,6 +17,6 @@ [submodule "third_party/thrift"] path = third_party/thrift url = https://github.com/apache/thrift.git -[submodule "third_party/google_benchmark"] - path = third_party/google_benchmark +[submodule "third_party/benchmark"] + path = third_party/benchmark url = https://github.com/google/benchmark @@ -1260,9 +1260,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc ifeq ($(EMBED_OPENSSL),true) -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a else -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a endif @@ -6998,43 +6998,43 @@ ifneq ($(NO_DEPS),true) endif -LIBGOOGLE_BENCHMARK_SRC = \ - third_party/google_benchmark/src/benchmark.cc \ - third_party/google_benchmark/src/benchmark_register.cc \ - third_party/google_benchmark/src/colorprint.cc \ - third_party/google_benchmark/src/commandlineflags.cc \ - third_party/google_benchmark/src/complexity.cc \ - third_party/google_benchmark/src/console_reporter.cc \ - third_party/google_benchmark/src/csv_reporter.cc \ - third_party/google_benchmark/src/json_reporter.cc \ - third_party/google_benchmark/src/reporter.cc \ - third_party/google_benchmark/src/sleep.cc \ - third_party/google_benchmark/src/string_util.cc \ - third_party/google_benchmark/src/sysinfo.cc \ - third_party/google_benchmark/src/timers.cc \ +LIBBENCHMARK_SRC = \ + third_party/benchmark/src/benchmark.cc \ + third_party/benchmark/src/benchmark_register.cc \ + third_party/benchmark/src/colorprint.cc \ + third_party/benchmark/src/commandlineflags.cc \ + third_party/benchmark/src/complexity.cc \ + third_party/benchmark/src/console_reporter.cc \ + third_party/benchmark/src/csv_reporter.cc \ + third_party/benchmark/src/json_reporter.cc \ + third_party/benchmark/src/reporter.cc \ + third_party/benchmark/src/sleep.cc \ + third_party/benchmark/src/string_util.cc \ + third_party/benchmark/src/sysinfo.cc \ + third_party/benchmark/src/timers.cc \ PUBLIC_HEADERS_CXX += \ -LIBGOOGLE_BENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGOOGLE_BENCHMARK_SRC)))) +LIBBENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBBENCHMARK_SRC)))) -$(LIBGOOGLE_BENCHMARK_OBJS): CPPFLAGS += -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX +$(LIBBENCHMARK_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX ifeq ($(NO_PROTOBUF),true) # You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay. -$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: protobuf_dep_error +$(LIBDIR)/$(CONFIG)/libbenchmark.a: protobuf_dep_error else -$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBGOOGLE_BENCHMARK_OBJS) +$(LIBDIR)/$(CONFIG)/libbenchmark.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBBENCHMARK_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` - $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a - $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBGOOGLE_BENCHMARK_OBJS) + $(Q) rm -f $(LIBDIR)/$(CONFIG)/libbenchmark.a + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBBENCHMARK_OBJS) ifeq ($(SYSTEM),Darwin) - $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a + $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libbenchmark.a endif @@ -7043,7 +7043,7 @@ endif endif ifneq ($(NO_DEPS),true) --include $(LIBGOOGLE_BENCHMARK_OBJS:.o=.dep) +-include $(LIBBENCHMARK_OBJS:.o=.dep) endif @@ -11736,16 +11736,16 @@ $(BINDIR)/$(CONFIG)/bm_fullstack: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/bm_fullstack: $(PROTOBUF_DEP) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/bm_fullstack: $(PROTOBUF_DEP) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack + $(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack.o: $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_bm_fullstack: $(BM_FULLSTACK_OBJS:.o=.dep) @@ -13192,16 +13192,16 @@ $(BINDIR)/$(CONFIG)/noop-benchmark: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/noop-benchmark: $(PROTOBUF_DEP) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +$(BINDIR)/$(CONFIG)/noop-benchmark: $(PROTOBUF_DEP) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/noop-benchmark + $(Q) $(LDXX) $(LDFLAGS) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/noop-benchmark endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/noop-benchmark.o: $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/noop-benchmark.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a deps_noop-benchmark: $(NOOP-BENCHMARK_OBJS:.o=.dep) diff --git a/build.yaml b/build.yaml index 3ee013a70c..330f5eddd0 100644 --- a/build.yaml +++ b/build.yaml @@ -2850,7 +2850,7 @@ targets: src: - test/cpp/microbenchmarks/bm_fullstack.cc deps: - - google_benchmark + - benchmark - grpc++_test_util - grpc_test_util - grpc++ @@ -3300,7 +3300,7 @@ targets: src: - test/cpp/microbenchmarks/noop-benchmark.cc deps: - - google_benchmark + - benchmark - name: proto_server_reflection_test gtest: true build: test @@ -3786,6 +3786,8 @@ configs: UBSAN_OPTIONS: halt_on_error=1:print_stacktrace=1 timeout_multiplier: 1.5 defaults: + benchmark: + CPPFLAGS: -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX boringssl: CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-unknown-pragmas -Wno-implicit-function-declaration -Wno-unused-variable -Wno-sign-compare $(NO_W_EXTRA_SEMI) @@ -3794,8 +3796,6 @@ defaults: global: CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter LDFLAGS: -g - google_benchmark: - CPPFLAGS: -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX zlib: CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration $(W_NO_SHIFT_NEGATIVE_VALUE) -fvisibility=hidden diff --git a/doc/connectivity-semantics-and-api.md b/doc/connectivity-semantics-and-api.md index cc007eaae3..6d39619d65 100644 --- a/doc/connectivity-semantics-and-api.md +++ b/doc/connectivity-semantics-and-api.md @@ -16,7 +16,7 @@ reconnect, or in the case of HTTP/2 GO_AWAY, re-resolve the name and reconnect. To hide the details of all this activity from the user of the gRPC API (i.e., application code) while exposing meaningful information about the state of a -channel, we use a state machine with four states, defined below: +channel, we use a state machine with five states, defined below: CONNECTING: The channel is trying to establish a connection and is waiting to make progress on one of the steps involved in name resolution, TCP connection @@ -116,7 +116,7 @@ Channel State API All gRPC libraries will expose a channel-level API method to poll the current state of a channel. In C++, this method is called GetCurrentState and returns -an enum for one of the four legal states. +an enum for one of the five legal states. All libraries should also expose an API that enables the application (user of the gRPC API) to be notified when the channel state changes. Since state diff --git a/doc/negative-http2-interop-test-descriptions.md b/doc/negative-http2-interop-test-descriptions.md new file mode 100644 index 0000000000..5ea3a96dff --- /dev/null +++ b/doc/negative-http2-interop-test-descriptions.md @@ -0,0 +1,194 @@ +Negative HTTP/2 Interop Test Case Descriptions +======================================= + +Client and server use +[test.proto](../src/proto/grpc/testing/test.proto). + +Server +------ +The code for the custom http2 server can be found +[here](https://github.com/grpc/grpc/tree/master/test/http2_test). +It is responsible for handling requests and sending responses, and also for +fulfilling the behavior of each particular test case. + +Server should accept these arguments: +* --port=PORT + * The port the server will run on. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Client +------ + +Clients implement test cases that test certain functionality. Each client is +provided the test case it is expected to run as a command-line parameter. Names +should be lowercase and without spaces. + +Clients should accept these arguments: +* --server_host=HOSTNAME + * The server host to connect to. For example, "localhost" or "127.0.0.1" +* --server_port=PORT + * The server port to connect to. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Note +----- + +Note that the server and client must be invoked with the same test case or else +the test will be meaningless. For convenience, we provide a shell script wrapper +that invokes both server and client at the same time, with the same test_case. +This is the preferred way to run these tests. + +## Test Cases + +### goaway + +This test verifies that the client correctly responds to a goaway sent by the +server. The client should handle the goaway by switching to a new stream without +the user application having to do a thing. + +Client Procedure: + 1. Client sends two UnaryCall requests with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was successful. +* Response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server sends a GOAWAY after receiving the first UnaryCall. + +Server asserts: +* The second UnaryCall has a different stream_id than the first one. + +### rst_after_header + +This test verifies that the client fails correctly when the server sends a +RST_STREAM immediately after sending headers to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending headers to the client. + +*At the moment the error code and message returned are not standardized throughout all +languages. Those checks will be added once all client languages behave the same way. [#9142](https://github.com/grpc/grpc/issues/9142) is in flight.* + +### rst_during_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM halfway through sending data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending half of + the requested data to the client. + +### rst_after_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM after sending all of the data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending all of the + data to the client. + +*Certain client languages allow the data to be accessed even though a RST_STREAM +was encountered. Once all client languages behave this way, checks will be added on +the incoming data.* + +### ping + +This test verifies that the client correctly acknowledges all pings it gets from the +server. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful. +* response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server tracks the number of outstanding pings (i.e. +1 when it sends a ping, and -1 + when it receives an ack from the client). + 2. Server sends pings before and after sending headers, also before and after sending data. + +Server Asserts: +* Number of outstanding pings is 0 when the connection is lost. + +### max_streams + +This test verifies that the client observes the MAX_CONCURRENT_STREAMS limit set by the server. + +Client Procedure: + 1. Client sends initial UnaryCall to allow the server to update its MAX_CONCURRENT_STREAMS settings. + 2. Client concurrently sends 10 UnaryCalls. + +Client Asserts: +* All UnaryCalls were successful, and had the correct type and payload size. + +Server Procedure: + 1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made. + +*The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.* diff --git a/examples/cpp/helloworld/greeter_client.cc b/examples/cpp/helloworld/greeter_client.cc index 12209f37df..61f3953056 100644 --- a/examples/cpp/helloworld/greeter_client.cc +++ b/examples/cpp/helloworld/greeter_client.cc @@ -51,7 +51,7 @@ class GreeterClient { GreeterClient(std::shared_ptr<Channel> channel) : stub_(Greeter::NewStub(channel)) {} - // Assambles the client's payload, sends it and presents the response back + // Assembles the client's payload, sends it and presents the response back // from the server. std::string SayHello(const std::string& user) { // Data we are sending to the server. diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py index 44d42c102b..281a68f3c3 100644 --- a/examples/python/helloworld/greeter_client.py +++ b/examples/python/helloworld/greeter_client.py @@ -34,11 +34,12 @@ from __future__ import print_function import grpc import helloworld_pb2 +import helloworld_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') - stub = helloworld_pb2.GreeterStub(channel) + stub = helloworld_pb2_grpc.GreeterStub(channel) response = stub.SayHello(helloworld_pb2.HelloRequest(name='you')) print("Greeter client received: " + response.message) diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py index 37d8bd49cc..0afc21d243 100644 --- a/examples/python/helloworld/greeter_server.py +++ b/examples/python/helloworld/greeter_server.py @@ -35,11 +35,12 @@ import time import grpc import helloworld_pb2 +import helloworld_pb2_grpc _ONE_DAY_IN_SECONDS = 60 * 60 * 24 -class Greeter(helloworld_pb2.GreeterServicer): +class Greeter(helloworld_pb2_grpc.GreeterServicer): def SayHello(self, request, context): return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name) @@ -47,7 +48,7 @@ class Greeter(helloworld_pb2.GreeterServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - helloworld_pb2.add_GreeterServicer_to_server(Greeter(), server) + helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) server.add_insecure_port('[::]:50051') server.start() try: diff --git a/examples/python/helloworld/helloworld_pb2.py b/examples/python/helloworld/helloworld_pb2.py index 3ce33fbf2b..6665b1f687 100644 --- a/examples/python/helloworld/helloworld_pb2.py +++ b/examples/python/helloworld/helloworld_pb2.py @@ -107,98 +107,123 @@ _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class GreeterStub(object): + """The greeting service definition. + """ + def __init__(self, channel): + """Constructor. -class GreeterStub(object): - """The greeting service definition. - """ + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) - def __init__(self, channel): - """Constructor. - Args: - channel: A grpc.Channel. + class GreeterServicer(object): + """The greeting service definition. """ - self.SayHello = channel.unary_unary( - '/helloworld.Greeter/SayHello', - request_serializer=HelloRequest.SerializeToString, - response_deserializer=HelloReply.FromString, - ) - - -class GreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaGreeterServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_GreeterServicer_to_server(servicer, server): - rpc_method_handlers = { - 'SayHello': grpc.unary_unary_rpc_method_handler( - servicer.SayHello, - request_deserializer=HelloRequest.FromString, - response_serializer=HelloReply.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'helloworld.Greeter', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaGreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SayHello(self, request, context): + """Sends a greeting + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + class BetaGreeterStub(object): + """The Beta API is deprecated for 0.15.0 and later. -class BetaGreeterStub(object): - """The greeting service definition. - """ - def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Sends a greeting + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - raise NotImplementedError() - SayHello.future = None - - -def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, - } - response_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, - } - method_implementations = { - ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, - } - response_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, - } - cardinalities = { - 'SayHello': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Sends a greeting + """ + raise NotImplementedError() + SayHello.future = None + + + def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, + } + response_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, + } + method_implementations = { + ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, + } + response_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, + } + cardinalities = { + 'SayHello': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/helloworld/helloworld_pb2_grpc.py b/examples/python/helloworld/helloworld_pb2_grpc.py new file mode 100644 index 0000000000..682dc36cd8 --- /dev/null +++ b/examples/python/helloworld/helloworld_pb2_grpc.py @@ -0,0 +1,47 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import helloworld_pb2 as helloworld__pb2 + + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=helloworld__pb2.HelloRequest.SerializeToString, + response_deserializer=helloworld__pb2.HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=helloworld__pb2.HelloRequest.FromString, + response_serializer=helloworld__pb2.HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/helloworld_pb2.py b/examples/python/multiplex/helloworld_pb2.py index 3ce33fbf2b..6665b1f687 100644 --- a/examples/python/multiplex/helloworld_pb2.py +++ b/examples/python/multiplex/helloworld_pb2.py @@ -107,98 +107,123 @@ _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class GreeterStub(object): + """The greeting service definition. + """ + def __init__(self, channel): + """Constructor. -class GreeterStub(object): - """The greeting service definition. - """ + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) - def __init__(self, channel): - """Constructor. - Args: - channel: A grpc.Channel. + class GreeterServicer(object): + """The greeting service definition. """ - self.SayHello = channel.unary_unary( - '/helloworld.Greeter/SayHello', - request_serializer=HelloRequest.SerializeToString, - response_deserializer=HelloReply.FromString, - ) - - -class GreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaGreeterServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_GreeterServicer_to_server(servicer, server): - rpc_method_handlers = { - 'SayHello': grpc.unary_unary_rpc_method_handler( - servicer.SayHello, - request_deserializer=HelloRequest.FromString, - response_serializer=HelloReply.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'helloworld.Greeter', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaGreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SayHello(self, request, context): + """Sends a greeting + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + class BetaGreeterStub(object): + """The Beta API is deprecated for 0.15.0 and later. -class BetaGreeterStub(object): - """The greeting service definition. - """ - def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Sends a greeting + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - raise NotImplementedError() - SayHello.future = None - - -def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, - } - response_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, - } - method_implementations = { - ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, - } - response_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, - } - cardinalities = { - 'SayHello': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Sends a greeting + """ + raise NotImplementedError() + SayHello.future = None + + + def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, + } + response_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, + } + method_implementations = { + ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, + } + response_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, + } + cardinalities = { + 'SayHello': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/multiplex/helloworld_pb2_grpc.py b/examples/python/multiplex/helloworld_pb2_grpc.py new file mode 100644 index 0000000000..682dc36cd8 --- /dev/null +++ b/examples/python/multiplex/helloworld_pb2_grpc.py @@ -0,0 +1,47 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import helloworld_pb2 as helloworld__pb2 + + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=helloworld__pb2.HelloRequest.SerializeToString, + response_deserializer=helloworld__pb2.HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=helloworld__pb2.HelloRequest.FromString, + response_serializer=helloworld__pb2.HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py index 2e8162926b..b2d2021e02 100644 --- a/examples/python/multiplex/multiplex_client.py +++ b/examples/python/multiplex/multiplex_client.py @@ -37,7 +37,9 @@ import time import grpc import helloworld_pb2 +import helloworld_pb2_grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources @@ -120,8 +122,8 @@ def guide_route_chat(route_guide_stub): def run(): channel = grpc.insecure_channel('localhost:50051') - greeter_stub = helloworld_pb2.GreeterStub(channel) - route_guide_stub = route_guide_pb2.RouteGuideStub(channel) + greeter_stub = helloworld_pb2_grpc.GreeterStub(channel) + route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel) greeter_response = greeter_stub.SayHello( helloworld_pb2.HelloRequest(name='you')) print("Greeter client received: " + greeter_response.message) diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py index 32a4ee4a49..b8b32e7bf8 100644 --- a/examples/python/multiplex/multiplex_server.py +++ b/examples/python/multiplex/multiplex_server.py @@ -36,7 +36,9 @@ import math import grpc import helloworld_pb2 +import helloworld_pb2_grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -70,13 +72,13 @@ def _get_distance(start, end): return R * c; -class _GreeterServicer(helloworld_pb2.GreeterServicer): +class _GreeterServicer(helloworld_pb2_grpc.GreeterServicer): def SayHello(self, request, context): return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name)) -class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer): +class _RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer): """Provides methods that implement functionality of route guide server.""" def __init__(self): @@ -133,8 +135,8 @@ class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - helloworld_pb2.add_GreeterServicer_to_server(_GreeterServicer(), server) - route_guide_pb2.add_RouteGuideServicer_to_server( + helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(), server) + route_guide_pb2_grpc.add_RouteGuideServicer_to_server( _RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() diff --git a/examples/python/multiplex/route_guide_pb2.py b/examples/python/multiplex/route_guide_pb2.py index 924e186e06..e6775eb814 100644 --- a/examples/python/multiplex/route_guide_pb2.py +++ b/examples/python/multiplex/route_guide_pb2.py @@ -277,240 +277,265 @@ _sym_db.RegisterMessage(RouteSummary) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class RouteGuideStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class RouteGuideStub(object): + """Interface exported by the server. """ - self.GetFeature = channel.unary_unary( - '/routeguide.RouteGuide/GetFeature', - request_serializer=Point.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.ListFeatures = channel.unary_stream( - '/routeguide.RouteGuide/ListFeatures', - request_serializer=Rectangle.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.RecordRoute = channel.stream_unary( - '/routeguide.RouteGuide/RecordRoute', - request_serializer=Point.SerializeToString, - response_deserializer=RouteSummary.FromString, - ) - self.RouteChat = channel.stream_stream( - '/routeguide.RouteGuide/RouteChat', - request_serializer=RouteNote.SerializeToString, - response_deserializer=RouteNote.FromString, - ) - - -class RouteGuideServicer(object): - """Interface exported by the server. - """ - - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + + class RouteGuideServicer(object): + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaRouteGuideServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_RouteGuideServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetFeature': grpc.unary_unary_rpc_method_handler( - servicer.GetFeature, - request_deserializer=Point.FromString, - response_serializer=Feature.SerializeToString, - ), - 'ListFeatures': grpc.unary_stream_rpc_method_handler( - servicer.ListFeatures, - request_deserializer=Rectangle.FromString, - response_serializer=Feature.SerializeToString, - ), - 'RecordRoute': grpc.stream_unary_rpc_method_handler( - servicer.RecordRoute, - request_deserializer=Point.FromString, - response_serializer=RouteSummary.SerializeToString, - ), - 'RouteChat': grpc.stream_stream_rpc_method_handler( - servicer.RouteChat, - request_deserializer=RouteNote.FromString, - response_serializer=RouteNote.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'routeguide.RouteGuide', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaRouteGuideServicer(object): - """Interface exported by the server. - """ - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaRouteGuideStub(object): - """Interface exported by the server. - """ - def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - raise NotImplementedError() - GetFeature.future = None - def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - raise NotImplementedError() - def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - raise NotImplementedError() - RecordRoute.future = None - def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaRouteGuideStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - raise NotImplementedError() - - -def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - response_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - method_implementations = { - ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), - ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), - ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), - ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - response_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - cardinalities = { - 'GetFeature': cardinality.Cardinality.UNARY_UNARY, - 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, - 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, - 'RouteChat': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + raise NotImplementedError() + GetFeature.future = None + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + raise NotImplementedError() + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + raise NotImplementedError() + RecordRoute.future = None + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + raise NotImplementedError() + + + def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + response_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + method_implementations = { + ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), + ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), + ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), + ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + response_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + cardinalities = { + 'GetFeature': cardinality.Cardinality.UNARY_UNARY, + 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, + 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, + 'RouteChat': cardinality.Cardinality.STREAM_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/multiplex/route_guide_pb2_grpc.py b/examples/python/multiplex/route_guide_pb2_grpc.py new file mode 100644 index 0000000000..27b24c747d --- /dev/null +++ b/examples/python/multiplex/route_guide_pb2_grpc.py @@ -0,0 +1,114 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import route_guide_pb2 as route__guide__pb2 + + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=route__guide__pb2.Rectangle.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=route__guide__pb2.RouteNote.SerializeToString, + response_deserializer=route__guide__pb2.RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=route__guide__pb2.Rectangle.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=route__guide__pb2.RouteNote.FromString, + response_serializer=route__guide__pb2.RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/run_codegen.py b/examples/python/multiplex/run_codegen.py index 7922a0f5c7..89ac9c8fae 100755..100644 --- a/examples/python/multiplex/run_codegen.py +++ b/examples/python/multiplex/run_codegen.py @@ -29,7 +29,7 @@ """Generates protocol messages and gRPC stubs.""" -from grpc.tools import protoc +from grpc_tools import protoc protoc.main( ( diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py index 8a80ed892d..d2955231c3 100644 --- a/examples/python/route_guide/route_guide_client.py +++ b/examples/python/route_guide/route_guide_client.py @@ -37,6 +37,7 @@ import time import grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources @@ -116,7 +117,7 @@ def guide_route_chat(stub): def run(): channel = grpc.insecure_channel('localhost:50051') - stub = route_guide_pb2.RouteGuideStub(channel) + stub = route_guide_pb2_grpc.RouteGuideStub(channel) print("-------------- GetFeature --------------") guide_get_feature(stub) print("-------------- ListFeatures --------------") diff --git a/examples/python/route_guide/route_guide_pb2.py b/examples/python/route_guide/route_guide_pb2.py index 924e186e06..e6775eb814 100644 --- a/examples/python/route_guide/route_guide_pb2.py +++ b/examples/python/route_guide/route_guide_pb2.py @@ -277,240 +277,265 @@ _sym_db.RegisterMessage(RouteSummary) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class RouteGuideStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class RouteGuideStub(object): + """Interface exported by the server. """ - self.GetFeature = channel.unary_unary( - '/routeguide.RouteGuide/GetFeature', - request_serializer=Point.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.ListFeatures = channel.unary_stream( - '/routeguide.RouteGuide/ListFeatures', - request_serializer=Rectangle.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.RecordRoute = channel.stream_unary( - '/routeguide.RouteGuide/RecordRoute', - request_serializer=Point.SerializeToString, - response_deserializer=RouteSummary.FromString, - ) - self.RouteChat = channel.stream_stream( - '/routeguide.RouteGuide/RouteChat', - request_serializer=RouteNote.SerializeToString, - response_deserializer=RouteNote.FromString, - ) - - -class RouteGuideServicer(object): - """Interface exported by the server. - """ - - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + + class RouteGuideServicer(object): + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaRouteGuideServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_RouteGuideServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetFeature': grpc.unary_unary_rpc_method_handler( - servicer.GetFeature, - request_deserializer=Point.FromString, - response_serializer=Feature.SerializeToString, - ), - 'ListFeatures': grpc.unary_stream_rpc_method_handler( - servicer.ListFeatures, - request_deserializer=Rectangle.FromString, - response_serializer=Feature.SerializeToString, - ), - 'RecordRoute': grpc.stream_unary_rpc_method_handler( - servicer.RecordRoute, - request_deserializer=Point.FromString, - response_serializer=RouteSummary.SerializeToString, - ), - 'RouteChat': grpc.stream_stream_rpc_method_handler( - servicer.RouteChat, - request_deserializer=RouteNote.FromString, - response_serializer=RouteNote.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'routeguide.RouteGuide', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaRouteGuideServicer(object): - """Interface exported by the server. - """ - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaRouteGuideStub(object): - """Interface exported by the server. - """ - def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - raise NotImplementedError() - GetFeature.future = None - def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - raise NotImplementedError() - def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - raise NotImplementedError() - RecordRoute.future = None - def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaRouteGuideStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - raise NotImplementedError() - - -def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - response_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - method_implementations = { - ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), - ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), - ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), - ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - response_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - cardinalities = { - 'GetFeature': cardinality.Cardinality.UNARY_UNARY, - 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, - 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, - 'RouteChat': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + raise NotImplementedError() + GetFeature.future = None + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + raise NotImplementedError() + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + raise NotImplementedError() + RecordRoute.future = None + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + raise NotImplementedError() + + + def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + response_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + method_implementations = { + ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), + ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), + ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), + ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + response_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + cardinalities = { + 'GetFeature': cardinality.Cardinality.UNARY_UNARY, + 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, + 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, + 'RouteChat': cardinality.Cardinality.STREAM_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/route_guide/route_guide_pb2_grpc.py b/examples/python/route_guide/route_guide_pb2_grpc.py new file mode 100644 index 0000000000..27b24c747d --- /dev/null +++ b/examples/python/route_guide/route_guide_pb2_grpc.py @@ -0,0 +1,114 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import route_guide_pb2 as route__guide__pb2 + + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=route__guide__pb2.Rectangle.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=route__guide__pb2.RouteNote.SerializeToString, + response_deserializer=route__guide__pb2.RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=route__guide__pb2.Rectangle.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=route__guide__pb2.RouteNote.FromString, + response_serializer=route__guide__pb2.RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py index 3ffe678476..bf49217932 100644 --- a/examples/python/route_guide/route_guide_server.py +++ b/examples/python/route_guide/route_guide_server.py @@ -36,6 +36,7 @@ import math import grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -68,7 +69,7 @@ def get_distance(start, end): R = 6371000; # metres return R * c; -class RouteGuideServicer(route_guide_pb2.RouteGuideServicer): +class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer): """Provides methods that implement functionality of route guide server.""" def __init__(self): @@ -125,7 +126,7 @@ class RouteGuideServicer(route_guide_pb2.RouteGuideServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - route_guide_pb2.add_RouteGuideServicer_to_server( + route_guide_pb2_grpc.add_RouteGuideServicer_to_server( RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() diff --git a/examples/python/route_guide/run_codegen.py b/examples/python/route_guide/run_codegen.py index c7c6008580..3751e019c9 100644 --- a/examples/python/route_guide/run_codegen.py +++ b/examples/python/route_guide/run_codegen.py @@ -29,7 +29,7 @@ """Runs protoc with the gRPC plugin to generate messages and gRPC stubs.""" -from grpc.tools import protoc +from grpc_tools import protoc protoc.main( ( diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 04f7211d21..e10e534a5e 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -35,7 +35,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -44,7 +44,9 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + # TODO(mxyan): Change back to "v#{version}" for next release + #:tag => "v#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec index 61d4b62d39..62eaa2aaf7 100644 --- a/gRPC-ProtoRPC.podspec +++ b/gRPC-ProtoRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-ProtoRPC' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'RPC library for Protocol Buffers, based on gRPC' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec index d59385c039..2e8fffd2f1 100644 --- a/gRPC-RxLibrary.podspec +++ b/gRPC-RxLibrary.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-RxLibrary' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Reactive Extensions library for iOS/OSX.' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC.podspec b/gRPC.podspec index 76410b17d2..e8b7709449 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/grpc.gemspec b/grpc.gemspec index 6019b97f67..9cafd1f2f9 100755 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -27,7 +27,7 @@ Gem::Specification.new do |s| s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb ) s.platform = Gem::Platform::RUBY - s.add_dependency 'google-protobuf', '~> 3.0.2' + s.add_dependency 'google-protobuf', '~> 3.1.0' s.add_dependency 'googleauth', '~> 0.5.1' s.add_development_dependency 'bundler', '~> 1.9' diff --git a/include/grpc++/impl/codegen/completion_queue.h b/include/grpc++/impl/codegen/completion_queue.h index ef00163b7e..944f2c3919 100644 --- a/include/grpc++/impl/codegen/completion_queue.h +++ b/include/grpc++/impl/codegen/completion_queue.h @@ -52,6 +52,7 @@ #include <grpc++/impl/codegen/grpc_library.h> #include <grpc++/impl/codegen/status.h> #include <grpc++/impl/codegen/time.h> +#include <grpc/impl/codegen/atm.h> struct grpc_completion_queue; @@ -101,6 +102,7 @@ class CompletionQueue : private GrpcLibraryCodegen { /// instance. CompletionQueue() { cq_ = g_core_codegen_interface->grpc_completion_queue_create(nullptr); + InitialAvalanching(); // reserve this for the future shutdown } /// Wrap \a take, taking ownership of the instance. @@ -151,7 +153,8 @@ class CompletionQueue : private GrpcLibraryCodegen { /// Request the shutdown of the queue. /// - /// \warning This method must be called at some point. Once invoked, \a Next + /// \warning This method must be called at some point if this completion queue + /// is accessed with Next or AsyncNext. Once invoked, \a Next /// will start to return false and \a AsyncNext will return \a /// NextStatus::SHUTDOWN. Only once either one of these methods does that /// (that is, once the queue has been \em drained) can an instance of this @@ -165,6 +168,21 @@ class CompletionQueue : private GrpcLibraryCodegen { /// owership is performed. grpc_completion_queue* cq() { return cq_; } + /// Manage state of avalanching operations : completion queue tags that + /// trigger other completion queue operations. The underlying core completion + /// queue should not really shutdown until all avalanching operations have + /// been finalized. Note that we maintain the requirement that an avalanche + /// registration must take place before CQ shutdown (which must be maintained + /// elsehwere) + void InitialAvalanching() { + gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1)); + } + void RegisterAvalanching() { + gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_, + static_cast<gpr_atm>(1)); + }; + void CompleteAvalanching(); + private: // Friend synchronous wrappers so that they can access Pluck(), which is // a semi-private API geared towards the synchronous implementation. @@ -229,6 +247,8 @@ class CompletionQueue : private GrpcLibraryCodegen { } grpc_completion_queue* cq_; // owned + + gpr_atm avalanches_in_flight_; }; /// A specific type of completion queue used by the processing of notifications diff --git a/include/grpc++/impl/codegen/server_interface.h b/include/grpc++/impl/codegen/server_interface.h index 41a64bead0..666b9ff66e 100644 --- a/include/grpc++/impl/codegen/server_interface.h +++ b/include/grpc++/impl/codegen/server_interface.h @@ -140,7 +140,7 @@ class ServerInterface : public CallHook { ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag, bool delete_on_finalize); - virtual ~BaseAsyncRequest() {} + virtual ~BaseAsyncRequest(); bool FinalizeResult(void** tag, bool* status) override; diff --git a/include/grpc++/resource_quota.h b/include/grpc++/resource_quota.h index 75e04d4e2f..68a514658d 100644 --- a/include/grpc++/resource_quota.h +++ b/include/grpc++/resource_quota.h @@ -37,6 +37,7 @@ struct grpc_resource_quota; #include <grpc++/impl/codegen/config.h> +#include <grpc++/impl/codegen/grpc_library.h> namespace grpc { @@ -44,7 +45,7 @@ namespace grpc { /// A ResourceQuota can be attached to a server (via ServerBuilder), or a client /// channel (via ChannelArguments). gRPC will attempt to keep memory used by /// all attached entities below the ResourceQuota bound. -class ResourceQuota final { +class ResourceQuota final : private GrpcLibraryCodegen { public: explicit ResourceQuota(const grpc::string& name); ResourceQuota(); diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 9252c6a63a..2ac2f0a1ef 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -187,7 +187,7 @@ class ServerBuilder { struct SyncServerSettings { SyncServerSettings() - : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)), + : num_cqs(1), min_pollers(1), max_pollers(INT_MAX), cq_timeout_msec(1000) {} diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 5e486215e0..898f4d533b 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -202,9 +202,15 @@ GRPCAPI grpc_call *grpc_channel_create_registered_call( completion of type 'tag' to the completion queue bound to the call. The order of ops specified in the batch has no significance. Only one operation of each type can be active at once in any given - batch. You must call grpc_completion_queue_next or - grpc_completion_queue_pluck on the completion queue associated with 'call' - for work to be performed. + batch. + If a call to grpc_call_start_batch returns GRPC_CALL_OK you must call + grpc_completion_queue_next or grpc_completion_queue_pluck on the completion + queue associated with 'call' for work to be performed. If a call to + grpc_call_start_batch returns any value other than GRPC_CALL_OK it is + guaranteed that no state associated with 'call' is changed and it is not + appropriate to call grpc_completion_queue_next or + grpc_completion_queue_pluck consequent to the failed grpc_call_start_batch + call. THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment needs to be synchronized. As an optimization, you may synchronize batches containing just send operations independently from batches containing just diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index da1bd9dcbf..4471ccf745 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -206,18 +206,12 @@ typedef struct { /** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */ #define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport" /** If non-zero, a pointer to a buffer pool (use grpc_resource_quota_arg_vtable - to fetch an appropriate pointer arg vtable */ + to fetch an appropriate pointer arg vtable) */ #define GRPC_ARG_RESOURCE_QUOTA "grpc.resource_quota" -/** Service config data, to be passed to subchannels. - Not intended for external use. */ +/** Service config data in JSON form. Not intended for use outside of tests. */ #define GRPC_ARG_SERVICE_CONFIG "grpc.service_config" /** LB policy name. */ #define GRPC_ARG_LB_POLICY_NAME "grpc.lb_policy_name" -/** Server name. Not intended for external use. */ -#define GRPC_ARG_SERVER_NAME "grpc.server_name" -/** Resolved addresses in a form used by the LB policy. - Not intended for external use. */ -#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses" /** The grpc_socket_mutator instance that set the socket options. A pointer. */ #define GRPC_ARG_SOCKET_MUTATOR "grpc.socket_mutator" /** \} */ @@ -218,15 +218,18 @@ SETUP_REQUIRES = INSTALL_REQUIRES + ( 'six>=1.10', ) if ENABLE_DOCUMENTATION_BUILD else () -if BUILD_WITH_CYTHON: - sys.stderr.write( - "You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, " - "but do not have Cython installed. We won't stop you from using " - "other commands, but the extension files will fail to build.\n") -elif need_cython: - sys.stderr.write( - 'We could not find Cython. Setup may take 10-20 minutes.\n') - SETUP_REQUIRES += ('cython>=0.23',) +try: + import Cython +except ImportError: + if BUILD_WITH_CYTHON: + sys.stderr.write( + "You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, " + "but do not have Cython installed. We won't stop you from using " + "other commands, but the extension files will fail to build.\n") + elif need_cython: + sys.stderr.write( + 'We could not find Cython. Setup may take 10-20 minutes.\n') + SETUP_REQUIRES += ('cython>=0.23',) COMMAND_CLASS = { 'doc': commands.SphinxDocumentation, diff --git a/src/google_benchmark/gen_build_yaml.py b/src/benchmark/gen_build_yaml.py index 302e08737a..09b76115a8 100755 --- a/src/google_benchmark/gen_build_yaml.py +++ b/src/benchmark/gen_build_yaml.py @@ -39,15 +39,15 @@ os.chdir(os.path.dirname(sys.argv[0])+'/../..') out = {} out['libs'] = [{ - 'name': 'google_benchmark', + 'name': 'benchmark', 'build': 'private', 'language': 'c++', 'secure': 'no', - 'defaults': 'google_benchmark', - 'src': sorted(glob.glob('third_party/google_benchmark/src/*.cc')), + 'defaults': 'benchmark', + 'src': sorted(glob.glob('third_party/benchmark/src/*.cc')), 'headers': sorted( - glob.glob('third_party/google_benchmark/src/*.h') + - glob.glob('third_party/google_benchmark/include/benchmark/*.h')), + glob.glob('third_party/benchmark/src/*.h') + + glob.glob('third_party/benchmark/include/benchmark/*.h')), }] print yaml.dump(out) diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc index a3af258d9c..cc7a7a96ae 100644 --- a/src/compiler/csharp_generator.cc +++ b/src/compiler/csharp_generator.cc @@ -68,13 +68,13 @@ namespace { // Currently, we cannot easily reuse the functionality as // google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header. // TODO(jtattermusch): reuse the functionality from google/protobuf. -void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, +bool GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, grpc::protobuf::SourceLocation location) { grpc::string comments = location.leading_comments.empty() ? location.trailing_comments : location.leading_comments; if (comments.empty()) { - return; + return false; } // XML escaping... no need for apostrophes etc as the whole text is going to // be a child @@ -107,18 +107,84 @@ void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, printer->Print("///\n"); } last_was_empty = false; - printer->Print("/// $line$\n", "line", *it); + printer->Print("///$line$\n", "line", *it); } } printer->Print("/// </summary>\n"); + return true; } template <typename DescriptorType> -void GenerateDocCommentBody(grpc::protobuf::io::Printer *printer, +bool GenerateDocCommentBody(grpc::protobuf::io::Printer *printer, const DescriptorType *descriptor) { grpc::protobuf::SourceLocation location; - if (descriptor->GetSourceLocation(&location)) { - GenerateDocCommentBodyImpl(printer, location); + if (!descriptor->GetSourceLocation(&location)) { + return false; + } + return GenerateDocCommentBodyImpl(printer, location); +} + +void GenerateDocCommentServerMethod(grpc::protobuf::io::Printer *printer, + const MethodDescriptor *method) { + if (GenerateDocCommentBody(printer, method)) { + if (method->client_streaming()) { + printer->Print( + "/// <param name=\"requestStream\">Used for reading requests from " + "the client.</param>\n"); + } else { + printer->Print( + "/// <param name=\"request\">The request received from the " + "client.</param>\n"); + } + if (method->server_streaming()) { + printer->Print( + "/// <param name=\"responseStream\">Used for sending responses back " + "to the client.</param>\n"); + } + printer->Print( + "/// <param name=\"context\">The context of the server-side call " + "handler being invoked.</param>\n"); + if (method->server_streaming()) { + printer->Print( + "/// <returns>A task indicating completion of the " + "handler.</returns>\n"); + } else { + printer->Print( + "/// <returns>The response to send back to the client (wrapped by a " + "task).</returns>\n"); + } + } +} + +void GenerateDocCommentClientMethod(grpc::protobuf::io::Printer *printer, + const MethodDescriptor *method, + bool is_sync, bool use_call_options) { + if (GenerateDocCommentBody(printer, method)) { + if (!method->client_streaming()) { + printer->Print( + "/// <param name=\"request\">The request to send to the " + "server.</param>\n"); + } + if (!use_call_options) { + printer->Print( + "/// <param name=\"headers\">The initial metadata to send with the " + "call. This parameter is optional.</param>\n"); + printer->Print( + "/// <param name=\"deadline\">An optional deadline for the call. The " + "call will be cancelled if deadline is hit.</param>\n"); + printer->Print( + "/// <param name=\"cancellationToken\">An optional token for " + "canceling the call.</param>\n"); + } else { + printer->Print( + "/// <param name=\"options\">The options for the call.</param>\n"); + } + if (is_sync) { + printer->Print( + "/// <returns>The response received from the server.</returns>\n"); + } else { + printer->Print("/// <returns>The call object.</returns>\n"); + } } } @@ -319,7 +385,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) { out->Indent(); for (int i = 0; i < service->method_count(); i++) { const MethodDescriptor *method = service->method(i); - GenerateDocCommentBody(out, method); + GenerateDocCommentServerMethod(out, method); out->Print( "public virtual $returntype$ " "$methodname$($request$$response_stream_maybe$, " @@ -393,7 +459,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { // unary calls have an extra synchronous stub method - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, true, false); out->Print( "public virtual $response$ $methodname$($request$ request, Metadata " "headers = null, DateTime? deadline = null, CancellationToken " @@ -411,7 +477,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { out->Print("}\n"); // overload taking CallOptions as a param - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, true, true); out->Print( "public virtual $response$ $methodname$($request$ request, " "CallOptions options)\n", @@ -432,7 +498,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { method_name += "Async"; // prevent name clash with synchronous method. } - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, false, false); out->Print( "public virtual $returntype$ $methodname$($request_maybe$Metadata " "headers = null, DateTime? deadline = null, CancellationToken " @@ -452,7 +518,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { out->Print("}\n"); // overload taking CallOptions as a param - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, false, true); out->Print( "public virtual $returntype$ $methodname$($request_maybe$CallOptions " "options)\n", @@ -518,6 +584,9 @@ void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) { "/// <summary>Creates service definition that can be registered with a " "server</summary>\n"); out->Print( + "/// <param name=\"serviceImpl\">An object implementing the server-side" + " handling logic.</param>\n"); + out->Print( "public static ServerServiceDefinition BindService($implclass$ " "serviceImpl)\n", "implclass", GetServerClassName(service)); diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc index b0a60092ab..4841da8da8 100644 --- a/src/compiler/python_generator.cc +++ b/src/compiler/python_generator.cc @@ -40,6 +40,7 @@ #include <map> #include <memory> #include <ostream> +#include <set> #include <sstream> #include <tuple> #include <vector> @@ -64,7 +65,9 @@ using std::make_pair; using std::map; using std::pair; using std::replace; +using std::tuple; using std::vector; +using std::set; namespace grpc_python_generator { @@ -73,6 +76,8 @@ namespace { typedef vector<const Descriptor*> DescriptorVector; typedef map<grpc::string, grpc::string> StringMap; typedef vector<grpc::string> StringVector; +typedef tuple<grpc::string, grpc::string> StringPair; +typedef set<StringPair> StringPairSet; // Provides RAII indentation handling. Use as: // { @@ -651,6 +656,7 @@ bool PrivateGenerator::PrintPreamble() { "face_utilities\n"); if (generate_in_pb2_grpc) { out->Print("\n"); + StringPairSet imports_set; for (int i = 0; i < file->service_count(); ++i) { const ServiceDescriptor* service = file->service(i); for (int j = 0; j < service->method_count(); ++j) { @@ -662,11 +668,15 @@ bool PrivateGenerator::PrintPreamble() { grpc::string type_file_name = type->file()->name(); grpc::string module_name = ModuleName(type_file_name); grpc::string module_alias = ModuleAlias(type_file_name); - out->Print("import $ModuleName$ as $ModuleAlias$\n", "ModuleName", - module_name, "ModuleAlias", module_alias); + imports_set.insert(std::make_tuple(module_name, module_alias)); } } } + for (StringPairSet::iterator it = imports_set.begin(); + it != imports_set.end(); ++it) { + out->Print("import $ModuleName$ as $ModuleAlias$\n", "ModuleName", + std::get<0>(*it), "ModuleAlias", std::get<1>(*it)); + } } return true; } @@ -714,6 +724,9 @@ pair<bool, grpc::string> PrivateGenerator::GetGrpcServices() { out = &out_printer; if (generate_in_pb2_grpc) { + out->Print( + "# Generated by the gRPC Python protocol compiler plugin. " + "DO NOT EDIT!\n"); if (!PrintPreamble()) { return make_pair(false, ""); } diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 397dbc40a8..8e4d4732b8 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -154,7 +154,8 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx, memset(d, 0, sizeof(*d)); d->start_ts = args->start_time; /* TODO(hongyu): call census_tracing_start_op here. */ - grpc_closure_init(&d->finish_recv, server_on_done_recv, elem); + grpc_closure_init(&d->finish_recv, server_on_done_recv, elem, + grpc_schedule_on_exec_ctx); return GRPC_ERROR_NONE; } @@ -167,11 +168,12 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(chand != NULL); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/ext/client_channel/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c index 9797e66564..b10f444b63 100644 --- a/src/core/ext/client_channel/channel_connectivity.c +++ b/src/core/ext/client_channel/channel_connectivity.c @@ -198,7 +198,8 @@ void grpc_channel_watch_connectivity_state( grpc_cq_begin_op(cq, tag); gpr_mu_init(&w->mu); - grpc_closure_init(&w->on_complete, watch_complete, w); + grpc_closure_init(&w->on_complete, watch_complete, w, + grpc_schedule_on_exec_ctx); w->phase = WAITING; w->state = last_observed_state; w->cq = cq; diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 1fcff4388a..a762b8917e 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -44,6 +44,7 @@ #include <grpc/support/useful.h> #include "src/core/ext/client_channel/lb_policy_registry.h" +#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/client_channel/subchannel.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" @@ -248,7 +249,8 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand, GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); w->chand = chand; - grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w); + grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w, + grpc_schedule_on_exec_ctx); w->state = current_state; w->lb_policy = lb_policy; grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, @@ -360,14 +362,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, } chand->method_params_table = method_params_table; if (lb_policy != NULL) { - grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, - NULL); + grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); } else if (chand->resolver == NULL /* disconnected */) { grpc_closure_list_fail_all( &chand->waiting_for_config_closures, GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1)); - grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures, - NULL); + grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); } if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) { GRPC_LB_POLICY_REF(lb_policy, "exit_idle"); @@ -424,7 +424,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport_op *op) { channel_data *chand = elem->channel_data; - grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); GPR_ASSERT(op->set_accept_stream == false); if (op->bind_pollset != NULL) { @@ -443,9 +443,8 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, if (op->send_ping != NULL) { if (chand->lb_policy == NULL) { - grpc_exec_ctx_sched(exec_ctx, op->send_ping, - GRPC_ERROR_CREATE("Ping with no load balancing"), - NULL); + grpc_closure_sched(exec_ctx, op->send_ping, + GRPC_ERROR_CREATE("Ping with no load balancing")); } else { grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping); op->bind_pollset = NULL; @@ -464,8 +463,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, if (!chand->started_resolving) { grpc_closure_list_fail_all(&chand->waiting_for_config_closures, GRPC_ERROR_REF(op->disconnect_with_error)); - grpc_exec_ctx_enqueue_list(exec_ctx, - &chand->waiting_for_config_closures, NULL); + grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures); } if (chand->lb_policy != NULL) { grpc_pollset_set_del_pollset_set(exec_ctx, @@ -499,24 +497,40 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, } /* Constructor for channel_data */ -static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; - memset(chand, 0, sizeof(*chand)); - GPR_ASSERT(args->is_last); GPR_ASSERT(elem->filter == &grpc_client_channel_filter); - + // Initialize data members. gpr_mu_init(&chand->mu); - grpc_closure_init(&chand->on_resolver_result_changed, - on_resolver_result_changed, chand); chand->owning_stack = args->channel_stack; - + grpc_closure_init(&chand->on_resolver_result_changed, + on_resolver_result_changed, chand, + grpc_schedule_on_exec_ctx); + chand->interested_parties = grpc_pollset_set_create(); grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, "client_channel"); - chand->interested_parties = grpc_pollset_set_create(); + // Record client channel factory. + const grpc_arg *arg = grpc_channel_args_find(args->channel_args, + GRPC_ARG_CLIENT_CHANNEL_FACTORY); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_POINTER); + grpc_client_channel_factory_ref(arg->value.pointer.p); + chand->client_channel_factory = arg->value.pointer.p; + // Instantiate resolver. + arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + chand->resolver = + grpc_resolver_create(exec_ctx, arg->value.string, args->channel_args, + chand->interested_parties); + if (chand->resolver == NULL) { + return GRPC_ERROR_CREATE("resolver creation failed"); + } + return GRPC_ERROR_NONE; } /* Destructor for channel_data */ @@ -662,8 +676,9 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { calld->waiting_ops_count = 0; calld->waiting_ops_capacity = 0; GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops"); - grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a), - GRPC_ERROR_NONE, NULL); + grpc_closure_sched( + exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, @@ -683,9 +698,15 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, "Failed to create subchannel", &error, 1)); } else if (GET_CALL(calld) == CANCELLED_CALL) { /* already cancelled before subchannel became ready */ - fail_locked(exec_ctx, calld, - GRPC_ERROR_CREATE_REFERENCING( - "Cancelled before creating subchannel", &error, 1)); + grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING( + "Cancelled before creating subchannel", &error, 1); + /* if due to deadline, attach the deadline exceeded status to the error */ + if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { + cancellation_error = + grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_DEADLINE_EXCEEDED); + } + fail_locked(exec_ctx, calld, cancellation_error); } else { /* Create call on subchannel. */ grpc_subchannel_call *subchannel_call = NULL; @@ -739,14 +760,14 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, if (cpa->connected_subchannel == NULL) { /* cancelled, do nothing */ } else if (error != GRPC_ERROR_NONE) { - grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL); + grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error)); } else { call_data *calld = cpa->elem->call_data; gpr_mu_lock(&calld->mu); if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, cpa->initial_metadata_flags, cpa->connected_subchannel, cpa->on_ready, GRPC_ERROR_NONE)) { - grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE); } gpr_mu_unlock(&calld->mu); } @@ -778,9 +799,9 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, cpa = closure->cb_arg; if (cpa->connected_subchannel == connected_subchannel) { cpa->connected_subchannel = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, cpa->on_ready, - GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); } } gpr_mu_unlock(&chand->mu); @@ -809,7 +830,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; } } - // TODO(dgq): make this deadline configurable somehow. const grpc_lb_policy_pick_args inputs = { initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)}; @@ -832,12 +852,12 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, cpa->connected_subchannel = connected_subchannel; cpa->on_ready = on_ready; cpa->elem = elem; - grpc_closure_init(&cpa->closure, continue_picking, cpa); + grpc_closure_init(&cpa->closure, continue_picking, cpa, + grpc_schedule_on_exec_ctx); grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, GRPC_ERROR_NONE); } else { - grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"), - NULL); + grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected")); } gpr_mu_unlock(&chand->mu); @@ -922,7 +942,8 @@ retry: calld->connected_subchannel == NULL && op->send_initial_metadata != NULL) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; - grpc_closure_init(&calld->next_step, subchannel_ready, elem); + grpc_closure_init(&calld->next_step, subchannel_ready, elem, + grpc_schedule_on_exec_ctx); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); /* If a subchannel is not available immediately, the polling entity from call_data should be provided to channel_data's interested_parties, so @@ -1068,7 +1089,8 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, // get the service config data once the resolver returns. // Take a reference to the call stack to be owned by the callback. GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); - grpc_closure_init(&calld->read_service_config, read_service_config, elem); + grpc_closure_init(&calld->read_service_config, read_service_config, elem, + grpc_schedule_on_exec_ctx); grpc_closure_list_append(&chand->waiting_for_config_closures, &calld->read_service_config, GRPC_ERROR_NONE); gpr_mu_unlock(&chand->mu); @@ -1130,30 +1152,6 @@ const grpc_channel_filter grpc_client_channel_filter = { "client-channel", }; -void grpc_client_channel_finish_initialization( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - grpc_resolver *resolver, - grpc_client_channel_factory *client_channel_factory) { - /* post construction initialization: set the transport setup pointer */ - GPR_ASSERT(client_channel_factory != NULL); - grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack); - channel_data *chand = elem->channel_data; - gpr_mu_lock(&chand->mu); - GPR_ASSERT(!chand->resolver); - chand->resolver = resolver; - GRPC_RESOLVER_REF(resolver, "channel"); - if (!grpc_closure_list_empty(chand->waiting_for_config_closures) || - chand->exit_idle_when_lb_policy_arrives) { - chand->started_resolving = true; - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); - grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result, - &chand->on_resolver_result_changed); - } - chand->client_channel_factory = client_channel_factory; - grpc_client_channel_factory_ref(client_channel_factory); - gpr_mu_unlock(&chand->mu); -} - grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { channel_data *chand = elem->channel_data; @@ -1205,7 +1203,8 @@ void grpc_client_channel_watch_connectivity_state( w->pollset = pollset; w->on_complete = on_complete; grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); - grpc_closure_init(&w->my_closure, on_external_watch_complete, w); + grpc_closure_init(&w->my_closure, on_external_watch_complete, w, + grpc_schedule_on_exec_ctx); GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, "external_connectivity_watcher"); gpr_mu_lock(&chand->mu); diff --git a/src/core/ext/client_channel/client_channel.h b/src/core/ext/client_channel/client_channel.h index ab5a84fdfb..f02587d0c1 100644 --- a/src/core/ext/client_channel/client_channel.h +++ b/src/core/ext/client_channel/client_channel.h @@ -38,6 +38,9 @@ #include "src/core/ext/client_channel/resolver.h" #include "src/core/lib/channel/channel_stack.h" +// Channel arg key for server URI string. +#define GRPC_ARG_SERVER_URI "grpc.server_uri" + /* A client channel is a channel that begins disconnected, and can connect to some endpoint on demand. If that endpoint disconnects, it will be connected to again later. @@ -47,13 +50,6 @@ extern const grpc_channel_filter grpc_client_channel_filter; -/* Post-construction initializer to give the client channel its resolver - and factory. */ -void grpc_client_channel_finish_initialization( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - grpc_resolver *resolver, - grpc_client_channel_factory *client_channel_factory); - grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect); diff --git a/src/core/ext/client_channel/client_channel_factory.c b/src/core/ext/client_channel/client_channel_factory.c index 4900832d57..4eb35dfcf7 100644 --- a/src/core/ext/client_channel/client_channel_factory.c +++ b/src/core/ext/client_channel/client_channel_factory.c @@ -55,3 +55,35 @@ grpc_channel* grpc_client_channel_factory_create_channel( return factory->vtable->create_client_channel(exec_ctx, factory, target, type, args); } + +static void* factory_arg_copy(void* factory) { + grpc_client_channel_factory_ref(factory); + return factory; +} + +static void factory_arg_destroy(void* factory) { + // TODO(roth): Remove local exec_ctx when + // https://github.com/grpc/grpc/pull/8705 is merged. + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_client_channel_factory_unref(&exec_ctx, factory); + grpc_exec_ctx_finish(&exec_ctx); +} + +static int factory_arg_cmp(void* factory1, void* factory2) { + if (factory1 < factory2) return -1; + if (factory1 > factory2) return 1; + return 0; +} + +static const grpc_arg_pointer_vtable factory_arg_vtable = { + factory_arg_copy, factory_arg_destroy, factory_arg_cmp}; + +grpc_arg grpc_client_channel_factory_create_channel_arg( + grpc_client_channel_factory* factory) { + grpc_arg arg; + arg.type = GRPC_ARG_POINTER; + arg.key = GRPC_ARG_CLIENT_CHANNEL_FACTORY; + arg.value.pointer.p = factory; + arg.value.pointer.vtable = &factory_arg_vtable; + return arg; +} diff --git a/src/core/ext/client_channel/client_channel_factory.h b/src/core/ext/client_channel/client_channel_factory.h index 2b8fc577b3..bf2764b537 100644 --- a/src/core/ext/client_channel/client_channel_factory.h +++ b/src/core/ext/client_channel/client_channel_factory.h @@ -39,6 +39,9 @@ #include "src/core/ext/client_channel/subchannel.h" #include "src/core/lib/channel/channel_stack.h" +// Channel arg key for client channel factory. +#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory" + typedef struct grpc_client_channel_factory grpc_client_channel_factory; typedef struct grpc_client_channel_factory_vtable grpc_client_channel_factory_vtable; @@ -83,4 +86,7 @@ grpc_channel *grpc_client_channel_factory_create_channel( const char *target, grpc_client_channel_type type, const grpc_channel_args *args); +grpc_arg grpc_client_channel_factory_create_channel_arg( + grpc_client_channel_factory *factory); + #endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */ diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c index 572af52dfd..a0fc95e7bb 100644 --- a/src/core/ext/client_channel/http_connect_handshaker.c +++ b/src/core/ext/client_channel/http_connect_handshaker.c @@ -40,6 +40,8 @@ #include <grpc/support/log.h> #include <grpc/support/string_util.h> +#include "src/core/ext/client_channel/client_channel.h" +#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/http/format_request.h" @@ -51,7 +53,6 @@ typedef struct http_connect_handshaker { grpc_handshaker base; char* proxy_server; - char* server_name; gpr_refcount refcount; gpr_mu mu; @@ -86,7 +87,6 @@ static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx, gpr_free(handshaker->read_buffer_to_destroy); } gpr_free(handshaker->proxy_server); - gpr_free(handshaker->server_name); grpc_slice_buffer_destroy(&handshaker->write_buffer); grpc_http_parser_destroy(&handshaker->http_parser); grpc_http_response_destroy(&handshaker->http_response); @@ -131,7 +131,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx, handshaker->shutdown = true; } // Invoke callback. - grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL); + grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error); } // Callback invoked when finished writing HTTP CONNECT request. @@ -229,7 +229,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, goto done; } // Success. Invoke handshake-done callback. - grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL); + grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error); done: // Set shutdown to true so that subsequent calls to // http_connect_handshaker_shutdown() do nothing. @@ -265,18 +265,27 @@ static void http_connect_handshaker_do_handshake( grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args) { http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in; - gpr_mu_lock(&handshaker->mu); + // Get server name from channel args. + const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + char* canonical_uri = + grpc_resolver_factory_add_default_prefix_if_needed(arg->value.string); + grpc_uri* uri = grpc_uri_parse(canonical_uri, 1); + char* server_name = uri->path; + if (server_name[0] == '/') ++server_name; // Save state in the handshaker object. + gpr_mu_lock(&handshaker->mu); handshaker->args = args; handshaker->on_handshake_done = on_handshake_done; // Send HTTP CONNECT request. - gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", - handshaker->server_name, handshaker->proxy_server); + gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", server_name, + handshaker->proxy_server); grpc_httpcli_request request; memset(&request, 0, sizeof(request)); - request.host = handshaker->proxy_server; + request.host = server_name; request.http.method = "CONNECT"; - request.http.path = handshaker->server_name; + request.http.path = server_name; request.handshaker = &grpc_httpcli_plaintext; grpc_slice request_slice = grpc_httpcli_format_connect_request(&request); grpc_slice_buffer_add(&handshaker->write_buffer, request_slice); @@ -285,28 +294,28 @@ static void http_connect_handshaker_do_handshake( grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer, &handshaker->request_done_closure); gpr_mu_unlock(&handshaker->mu); + // Clean up. + gpr_free(canonical_uri); + grpc_uri_destroy(uri); } static const grpc_handshaker_vtable http_connect_handshaker_vtable = { http_connect_handshaker_destroy, http_connect_handshaker_shutdown, http_connect_handshaker_do_handshake}; -grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server, - const char* server_name) { +grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server) { GPR_ASSERT(proxy_server != NULL); - GPR_ASSERT(server_name != NULL); http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker)); memset(handshaker, 0, sizeof(*handshaker)); grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base); gpr_mu_init(&handshaker->mu); gpr_ref_init(&handshaker->refcount, 1); handshaker->proxy_server = gpr_strdup(proxy_server); - handshaker->server_name = gpr_strdup(server_name); grpc_slice_buffer_init(&handshaker->write_buffer); grpc_closure_init(&handshaker->request_done_closure, on_write_done, - handshaker); + handshaker, grpc_schedule_on_exec_ctx); grpc_closure_init(&handshaker->response_read_closure, on_read_done, - handshaker); + handshaker, grpc_schedule_on_exec_ctx); grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE, &handshaker->http_response); return &handshaker->base; diff --git a/src/core/ext/client_channel/http_connect_handshaker.h b/src/core/ext/client_channel/http_connect_handshaker.h index c689df2b2b..ea293852e6 100644 --- a/src/core/ext/client_channel/http_connect_handshaker.h +++ b/src/core/ext/client_channel/http_connect_handshaker.h @@ -36,9 +36,8 @@ #include "src/core/lib/channel/handshaker.h" -/// Does NOT take ownership of \a proxy_server or \a server_name. -grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server, - const char* server_name); +/// Does NOT take ownership of \a proxy_server. +grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server); /// Returns the name of the proxy to use, or NULL if no proxy is configured. /// Caller takes ownership of result. diff --git a/src/core/ext/client_channel/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h index e2b8080a32..79b3dee259 100644 --- a/src/core/ext/client_channel/lb_policy_factory.h +++ b/src/core/ext/client_channel/lb_policy_factory.h @@ -40,6 +40,9 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/resolve_address.h" +// Channel arg key for grpc_lb_addresses. +#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses" + typedef struct grpc_lb_policy_factory grpc_lb_policy_factory; typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable; diff --git a/src/core/ext/client_channel/resolver_factory.c b/src/core/ext/client_channel/resolver_factory.c index 7c3d644257..00bbb92dd0 100644 --- a/src/core/ext/client_channel/resolver_factory.c +++ b/src/core/ext/client_channel/resolver_factory.c @@ -43,9 +43,10 @@ void grpc_resolver_factory_unref(grpc_resolver_factory* factory) { /** Create a resolver instance for a name */ grpc_resolver* grpc_resolver_factory_create_resolver( - grpc_resolver_factory* factory, grpc_resolver_args* args) { + grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory, + grpc_resolver_args* args) { if (factory == NULL) return NULL; - return factory->vtable->create_resolver(factory, args); + return factory->vtable->create_resolver(exec_ctx, factory, args); } char* grpc_resolver_factory_get_default_authority( diff --git a/src/core/ext/client_channel/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h index 4da42e84d2..3792ddca18 100644 --- a/src/core/ext/client_channel/resolver_factory.h +++ b/src/core/ext/client_channel/resolver_factory.h @@ -37,6 +37,7 @@ #include "src/core/ext/client_channel/client_channel_factory.h" #include "src/core/ext/client_channel/resolver.h" #include "src/core/ext/client_channel/uri_parser.h" +#include "src/core/lib/iomgr/pollset_set.h" typedef struct grpc_resolver_factory grpc_resolver_factory; typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable; @@ -48,6 +49,7 @@ struct grpc_resolver_factory { typedef struct grpc_resolver_args { grpc_uri *uri; const grpc_channel_args *args; + grpc_pollset_set *pollset_set; } grpc_resolver_args; struct grpc_resolver_factory_vtable { @@ -55,7 +57,8 @@ struct grpc_resolver_factory_vtable { void (*unref)(grpc_resolver_factory *factory); /** Implementation of grpc_resolver_factory_create_resolver */ - grpc_resolver *(*create_resolver)(grpc_resolver_factory *factory, + grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx, + grpc_resolver_factory *factory, grpc_resolver_args *args); /** Implementation of grpc_resolver_factory_get_default_authority */ @@ -70,7 +73,8 @@ void grpc_resolver_factory_unref(grpc_resolver_factory *resolver); /** Create a resolver instance for a name */ grpc_resolver *grpc_resolver_factory_create_resolver( - grpc_resolver_factory *factory, grpc_resolver_args *args); + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, + grpc_resolver_args *args); /** Return a (freshly allocated with gpr_malloc) string representing the default authority to use for this scheme. */ diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c index d0f0fc3f33..5110a7cad9 100644 --- a/src/core/ext/client_channel/resolver_registry.c +++ b/src/core/ext/client_channel/resolver_registry.c @@ -109,8 +109,8 @@ static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) { } static grpc_resolver_factory *resolve_factory(const char *target, - grpc_uri **uri) { - char *tmp; + grpc_uri **uri, + char **canonical_target) { grpc_resolver_factory *factory = NULL; GPR_ASSERT(uri != NULL); @@ -118,37 +118,54 @@ static grpc_resolver_factory *resolve_factory(const char *target, factory = lookup_factory_by_uri(*uri); if (factory == NULL) { grpc_uri_destroy(*uri); - gpr_asprintf(&tmp, "%s%s", g_default_resolver_prefix, target); - *uri = grpc_uri_parse(tmp, 1); + gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target); + *uri = grpc_uri_parse(*canonical_target, 1); factory = lookup_factory_by_uri(*uri); if (factory == NULL) { grpc_uri_destroy(grpc_uri_parse(target, 0)); - grpc_uri_destroy(grpc_uri_parse(tmp, 0)); - gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, tmp); + grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0)); + gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, + *canonical_target); } - gpr_free(tmp); } return factory; } -grpc_resolver *grpc_resolver_create(const char *target, - const grpc_channel_args *args) { +grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, + const grpc_channel_args *args, + grpc_pollset_set *pollset_set) { grpc_uri *uri = NULL; - grpc_resolver_factory *factory = resolve_factory(target, &uri); + char *canonical_target = NULL; + grpc_resolver_factory *factory = + resolve_factory(target, &uri, &canonical_target); grpc_resolver *resolver; grpc_resolver_args resolver_args; memset(&resolver_args, 0, sizeof(resolver_args)); resolver_args.uri = uri; resolver_args.args = args; - resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args); + resolver_args.pollset_set = pollset_set; + resolver = + grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args); grpc_uri_destroy(uri); + gpr_free(canonical_target); return resolver; } char *grpc_get_default_authority(const char *target) { grpc_uri *uri = NULL; - grpc_resolver_factory *factory = resolve_factory(target, &uri); + char *canonical_target = NULL; + grpc_resolver_factory *factory = + resolve_factory(target, &uri, &canonical_target); char *authority = grpc_resolver_factory_get_default_authority(factory, uri); grpc_uri_destroy(uri); + gpr_free(canonical_target); return authority; } + +char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target) { + grpc_uri *uri = NULL; + char *canonical_target = NULL; + resolve_factory(target, &uri, &canonical_target); + grpc_uri_destroy(uri); + return canonical_target == NULL ? gpr_strdup(target) : canonical_target; +} diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h index 2a95a669f0..4fb16131db 100644 --- a/src/core/ext/client_channel/resolver_registry.h +++ b/src/core/ext/client_channel/resolver_registry.h @@ -35,6 +35,7 @@ #define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H #include "src/core/ext/client_channel/resolver_factory.h" +#include "src/core/lib/iomgr/pollset_set.h" void grpc_resolver_registry_init(); void grpc_resolver_registry_shutdown(void); @@ -60,8 +61,9 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory); If a resolver factory was not found, return NULL. \a args is a set of channel arguments to be included in the result (typically the set of arguments passed in from the client API). */ -grpc_resolver *grpc_resolver_create(const char *target, - const grpc_channel_args *args); +grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, + const grpc_channel_args *args, + grpc_pollset_set *pollset_set); /** Find a resolver factory given a name and return an (owned-by-the-caller) * reference to it */ @@ -71,4 +73,8 @@ grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name); representing the default authority to pass from a client. */ char *grpc_get_default_authority(const char *target); +/** Returns a newly allocated string containing \a target, adding the + default prefix if needed. */ +char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target); + #endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */ diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c index 08632079d6..87f0ef298a 100644 --- a/src/core/ext/client_channel/subchannel.c +++ b/src/core/ext/client_channel/subchannel.c @@ -293,8 +293,9 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx, gpr_atm old_refs; old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF")); if (old_refs == 1) { - grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c), - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } } @@ -330,7 +331,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx, c->args = grpc_channel_args_copy(args->args); c->root_external_state_watcher.next = c->root_external_state_watcher.prev = &c->root_external_state_watcher; - grpc_closure_init(&c->connected, subchannel_connected, c); + grpc_closure_init(&c->connected, subchannel_connected, c, + grpc_schedule_on_exec_ctx); grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, "subchannel"); int initial_backoff_ms = @@ -505,7 +507,8 @@ void grpc_subchannel_notify_on_state_change( w->subchannel = c; w->pollset_set = interested_parties; w->notify = notify; - grpc_closure_init(&w->closure, on_external_state_watcher_done, w); + grpc_closure_init(&w->closure, on_external_state_watcher_done, w, + grpc_schedule_on_exec_ctx); if (interested_parties != NULL) { grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set, interested_parties); @@ -604,14 +607,20 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder_set_transport(builder, c->connecting_result.transport); - if (grpc_channel_init_create_stack(exec_ctx, builder, - GRPC_CLIENT_SUBCHANNEL)) { - con = grpc_channel_stack_builder_finish(exec_ctx, builder, 0, 1, - connection_destroy, NULL); - } else { + if (!grpc_channel_init_create_stack(exec_ctx, builder, + GRPC_CLIENT_SUBCHANNEL)) { grpc_channel_stack_builder_destroy(builder); abort(); /* TODO(ctiller): what to do here (previously we just crashed) */ } + grpc_error *error = grpc_channel_stack_builder_finish( + exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con); + if (error != GRPC_ERROR_NONE) { + const char *msg = grpc_error_string(error); + gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", msg); + grpc_error_free_string(msg); + GRPC_ERROR_UNREF(error); + abort(); /* TODO(ctiller): what to do here? */ + } stk = CHANNEL_STACK_FROM_CONNECTION(con); memset(&c->connecting_result, 0, sizeof(c->connecting_result)); @@ -620,7 +629,7 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx, sw_subchannel->subchannel = c; sw_subchannel->connectivity_state = GRPC_CHANNEL_READY; grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed, - sw_subchannel); + sw_subchannel, grpc_schedule_on_exec_ctx); if (c->disconnected) { gpr_free(sw_subchannel); diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h index 10bae620df..24aa9f73dc 100644 --- a/src/core/ext/client_channel/subchannel.h +++ b/src/core/ext/client_channel/subchannel.h @@ -164,8 +164,6 @@ struct grpc_subchannel_args { size_t filter_count; /** Channel arguments to be supplied to the newly created channel */ const grpc_channel_args *args; - /** Server name */ - const char *server_name; /** Address to connect to */ grpc_resolved_address *addr; }; diff --git a/src/core/ext/client_channel/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c index 227013a7d7..a1ba5e945c 100644 --- a/src/core/ext/client_channel/subchannel_index.c +++ b/src/core/ext/client_channel/subchannel_index.c @@ -86,7 +86,6 @@ static grpc_subchannel_key *create_key( } else { k->args.filters = NULL; } - k->args.server_name = gpr_strdup(args->server_name); k->args.addr = gpr_malloc(sizeof(grpc_resolved_address)); k->args.addr->len = args->addr->len; if (k->args.addr->len > 0) { @@ -113,8 +112,6 @@ static int subchannel_key_compare(grpc_subchannel_key *a, if (c != 0) return c; c = GPR_ICMP(a->args.filter_count, b->args.filter_count); if (c != 0) return c; - c = strcmp(a->args.server_name, b->args.server_name); - if (c != 0) return c; if (a->args.addr->len) { c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len); if (c != 0) return c; @@ -132,7 +129,6 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx, grpc_connector_unref(exec_ctx, k->connector); gpr_free((grpc_channel_args *)k->args.filters); grpc_channel_args_destroy((grpc_channel_args *)k->args.args); - gpr_free((void *)k->args.server_name); gpr_free(k->args.addr); gpr_free(k); } diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index df0db61c22..bb0ee9d95c 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -106,6 +106,7 @@ #include <grpc/support/string_util.h> #include <grpc/support/time.h> +#include "src/core/ext/client_channel/client_channel.h" #include "src/core/ext/client_channel/client_channel_factory.h" #include "src/core/ext/client_channel/lb_policy_factory.h" #include "src/core/ext/client_channel/lb_policy_registry.h" @@ -179,8 +180,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg, wrapped_rr_closure_arg *wc_arg = arg; GPR_ASSERT(wc_arg->wrapped_closure != NULL); - grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error), - NULL); + grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error)); if (wc_arg->rr_policy != NULL) { /* if *target is NULL, no pick has been made by the RR policy (eg, all @@ -247,7 +247,8 @@ static void add_pending_pick(pending_pick **root, pick_args->lb_token_mdelem_storage; pp->wrapped_on_complete_arg.free_when_done = pp; grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure, - wrapped_rr_closure, &pp->wrapped_on_complete_arg); + wrapped_rr_closure, &pp->wrapped_on_complete_arg, + grpc_schedule_on_exec_ctx); *root = pp; } @@ -267,7 +268,8 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) { pping->wrapped_notify_arg.free_when_done = pping; pping->next = *root; grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure, - wrapped_rr_closure, &pping->wrapped_notify_arg); + wrapped_rr_closure, &pping->wrapped_notify_arg, + grpc_schedule_on_exec_ctx); *root = pping; } @@ -666,7 +668,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx, gpr_malloc(sizeof(rr_connectivity_data)); memset(rr_connectivity, 0, sizeof(rr_connectivity_data)); grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed, - rr_connectivity); + rr_connectivity, grpc_schedule_on_exec_ctx); rr_connectivity->glb_policy = glb_policy; rr_connectivity->state = new_rr_state; @@ -743,12 +745,6 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory, grpc_lb_policy_args *args) { - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Count the number of gRPC-LB addresses. There must be at least one. * TODO(roth): For now, we ignore non-balancer addresses, but in the * future, we may change the behavior such that we fall back to using @@ -756,7 +752,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, * time, this should be changed to allow a list with no balancer addresses, * since the resolver might fail to return a balancer address even when * this is the right LB policy to use. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_grpclb_addrs = 0; @@ -768,13 +765,25 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy)); memset(glb_policy, 0, sizeof(*glb_policy)); + /* Get server name. */ + arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + grpc_uri *uri = grpc_uri_parse(arg->value.string, true); + GPR_ASSERT(uri->path[0] != '\0'); + glb_policy->server_name = + gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); + if (grpc_lb_glb_trace) { + gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.", + glb_policy->server_name); + } + grpc_uri_destroy(uri); + /* All input addresses in addresses come from a resolver that claims * they are LB services. It's the resolver's responsibility to make sure - * this - * policy is only instantiated and used in that case. + * this policy is only instantiated and used in that case. * * Create a client channel over them to communicate with a LB service */ - glb_policy->server_name = gpr_strdup(server_name); glb_policy->cc_factory = args->client_channel_factory; glb_policy->args = grpc_channel_args_copy(args->args); GPR_ASSERT(glb_policy->cc_factory != NULL); @@ -818,9 +827,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, * We need the LB channel to return addresses with is_balancer=false * so that it does not wind up recursively using the grpclb LB policy, * as per the special case logic in client_channel.c. + * + * Finally, we also strip out the channel arg for the server URI, + * since that will be different for the LB channel than for the parent + * channel. (The client channel factory will re-add this arg with + * the right value.) */ - static const char *keys_to_remove[] = {GRPC_ARG_LB_POLICY_NAME, - GRPC_ARG_LB_ADDRESSES}; + static const char *keys_to_remove[] = { + GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI}; grpc_channel_args *new_args = grpc_channel_args_copy_and_remove( args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove)); glb_policy->lb_channel = grpc_client_channel_factory_create_channel( @@ -895,15 +909,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while (pp != NULL) { pending_pick *next = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, + GRPC_ERROR_NONE); pp = next; } while (pping != NULL) { pending_ping *next = pping->next; - grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, + GRPC_ERROR_NONE); pping = next; } } @@ -919,9 +933,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if (pp->target == target) { *target = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1)); } else { pp->next = glb_policy->pending_picks; glb_policy->pending_picks = pp; @@ -944,9 +958,9 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1)); } else { pp->next = glb_policy->pending_picks; glb_policy->pending_picks = pp; @@ -981,11 +995,10 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_closure *on_complete) { if (pick_args->lb_token_mdelem_storage == NULL) { *target = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, on_complete, GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting " - "won't work without it. Failing"), - NULL); + "won't work without it. Failing")); return 0; } @@ -1004,7 +1017,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg)); memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg)); - grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg); + grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg, + grpc_schedule_on_exec_ctx); wc_arg->rr_policy = glb_policy->rr_policy; wc_arg->target = target; wc_arg->wrapped_closure = on_complete; @@ -1104,9 +1118,11 @@ static void lb_call_init_locked(glb_lb_policy *glb_policy) { glb_policy->lb_call_status_details_capacity = 0; grpc_closure_init(&glb_policy->lb_on_server_status_received, - lb_on_server_status_received, glb_policy); + lb_on_server_status_received, glb_policy, + grpc_schedule_on_exec_ctx); grpc_closure_init(&glb_policy->lb_on_response_received, - lb_on_response_received, glb_policy); + lb_on_response_received, glb_policy, + grpc_schedule_on_exec_ctx); gpr_backoff_init(&glb_policy->lb_call_backoff_state, GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS, diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c index afecb716fb..e352e0396f 100644 --- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c @@ -1,35 +1,3 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ /* Automatically generated nanopb constant definitions */ /* Generated by nanopb-0.3.7-dev */ diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h index e36d0966f8..725aa7e386 100644 --- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h @@ -1,35 +1,3 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ /* Automatically generated nanopb header */ /* Generated by nanopb-0.3.7-dev */ diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index c69f773e78..821becff69 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -120,7 +120,7 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while (pp != NULL) { pending_pick *next = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); pp = next; } @@ -138,9 +138,9 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if (pp->target == target) { *target = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1)); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -165,9 +165,9 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1)); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -306,14 +306,15 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, /* drop the pick list: we are connected now */ GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels"); gpr_atm_rel_store(&p->selected, (gpr_atm)selected); - grpc_exec_ctx_sched(exec_ctx, - grpc_closure_create(destroy_subchannels, p), - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, + grpc_closure_create(destroy_subchannels, p, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); /* update any calls that were waiting for a pick */ while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked"); - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); } grpc_connected_subchannel_notify_on_state_change( @@ -366,8 +367,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, - NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); } GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, @@ -419,8 +419,7 @@ static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (selected) { grpc_connected_subchannel_ping(exec_ctx, selected, closure); } else { - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"), - NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected")); } } @@ -438,15 +437,10 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, grpc_lb_policy_args *args) { GPR_ASSERT(args->client_channel_factory != NULL); - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Find the number of backend addresses. We ignore balancer * addresses, since we don't know how to handle them. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_addrs = 0; @@ -472,9 +466,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, } memset(&sc_args, 0, sizeof(grpc_subchannel_args)); - /* server_name will be copied as part of the subchannel creation. This makes - * the copying of server_name (a borrowed pointer) OK. */ - sc_args.server_name = server_name; sc_args.addr = &addresses->addresses[i].address; sc_args.args = args->args; @@ -493,7 +484,8 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, p->num_subchannels = subchannel_idx; grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable); - grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p); + grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p, + grpc_schedule_on_exec_ctx); gpr_mu_init(&p->mu); return &p->base; } diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 59f84054c4..47f20a1904 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -321,8 +321,8 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE("Channel Shutdown"), NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, + GRPC_ERROR_CREATE("Channel Shutdown")); gpr_free(pp); } grpc_connectivity_state_set( @@ -348,9 +348,9 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if (pp->target == target) { *target = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -376,9 +376,9 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { *pp->target = NULL; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -581,7 +581,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)", (void *)selected->subchannel, (void *)selected); } - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); } update_lb_connectivity_status(exec_ctx, sd, error); @@ -634,7 +634,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); } } @@ -684,8 +684,8 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked"); } else { gpr_mu_unlock(&p->mu); - grpc_exec_ctx_sched(exec_ctx, closure, - GRPC_ERROR_CREATE("Round Robin not connected"), NULL); + grpc_closure_sched(exec_ctx, closure, + GRPC_ERROR_CREATE("Round Robin not connected")); } } @@ -703,15 +703,10 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_args *args) { GPR_ASSERT(args->client_channel_factory != NULL); - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Find the number of backend addresses. We ignore balancer * addresses, since we don't know how to handle them. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_addrs = 0; @@ -734,9 +729,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, if (addresses->addresses[i].is_balancer) continue; memset(&sc_args, 0, sizeof(grpc_subchannel_args)); - /* server_name will be copied as part of the subchannel creation. This makes - * the copying of server_name (a borrowed pointer) OK. */ - sc_args.server_name = server_name; sc_args.addr = &addresses->addresses[i].address; sc_args.args = args->args; @@ -757,7 +749,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, } ++subchannel_idx; grpc_closure_init(&sd->connectivity_changed_closure, - rr_connectivity_changed, sd); + rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx); } } if (subchannel_idx == 0) { diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c index b810e20bb9..1403eb0d33 100644 --- a/src/core/ext/load_reporting/load_reporting_filter.c +++ b/src/core/ext/load_reporting/load_reporting_filter.c @@ -114,7 +114,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, memset(calld, 0, sizeof(call_data)); calld->id = (intptr_t)args->call_stack; - grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem); + grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem, + grpc_schedule_on_exec_ctx); /* TODO(dgq): do something with the data channel_data *chand = elem->channel_data; @@ -152,9 +153,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(!args->is_last); channel_data *chand = elem->channel_data; @@ -171,6 +172,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, NULL, NULL}; */ + + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c index 15476f5792..efbb2be4e1 100644 --- a/src/core/ext/resolver/dns/native/dns_resolver.c +++ b/src/core/ext/resolver/dns/native/dns_resolver.c @@ -61,6 +61,8 @@ typedef struct { char *default_port; /** channel args. */ grpc_channel_args *channel_args; + /** pollset_set to drive the name resolution process */ + grpc_pollset_set *interested_parties; /** mutex guarding the rest of the state */ gpr_mu mu; @@ -110,8 +112,8 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) { } if (r->next_completion != NULL) { *r->target_result = NULL; - grpc_exec_ctx_sched(exec_ctx, r->next_completion, - GRPC_ERROR_CREATE("Resolver Shutdown"), NULL); + grpc_closure_sched(exec_ctx, r->next_completion, + GRPC_ERROR_CREATE("Resolver Shutdown")); r->next_completion = NULL; } gpr_mu_unlock(&r->mu); @@ -217,8 +219,10 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx, GPR_ASSERT(!r->resolving); r->resolving = true; r->addresses = NULL; - grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port, - grpc_closure_create(dns_on_resolved, r), &r->addresses); + grpc_resolve_address( + exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties, + grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx), + &r->addresses); } static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, @@ -228,7 +232,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, *r->target_result = r->resolved_result == NULL ? NULL : grpc_channel_args_copy(r->resolved_result); - grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); r->next_completion = NULL; r->published_version = r->resolved_version; } @@ -240,13 +244,15 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { if (r->resolved_result != NULL) { grpc_channel_args_destroy(r->resolved_result); } + grpc_pollset_set_destroy(r->interested_parties); gpr_free(r->name_to_resolve); gpr_free(r->default_port); grpc_channel_args_destroy(r->channel_args); gpr_free(r); } -static grpc_resolver *dns_create(grpc_resolver_args *args, +static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx, + grpc_resolver_args *args, const char *default_port) { if (0 != strcmp(args->uri->authority, "")) { gpr_log(GPR_ERROR, "authority based dns uri's not supported"); @@ -264,12 +270,12 @@ static grpc_resolver *dns_create(grpc_resolver_args *args, grpc_resolver_init(&r->base, &dns_resolver_vtable); r->name_to_resolve = proxy_name == NULL ? gpr_strdup(path) : proxy_name; r->default_port = gpr_strdup(default_port); - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = (char *)path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); + r->interested_parties = grpc_pollset_set_create(); + if (args->pollset_set != NULL) { + grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, + args->pollset_set); + } gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER, @@ -287,8 +293,9 @@ static void dns_factory_ref(grpc_resolver_factory *factory) {} static void dns_factory_unref(grpc_resolver_factory *factory) {} static grpc_resolver *dns_factory_create_resolver( - grpc_resolver_factory *factory, grpc_resolver_args *args) { - return dns_create(args, "https"); + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, + grpc_resolver_args *args) { + return dns_create(exec_ctx, args, "https"); } static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory, diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c index 26a650aadd..55ea502c9e 100644 --- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c +++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c @@ -89,7 +89,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&r->mu); if (r->next_completion != NULL) { *r->target_result = NULL; - grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); r->next_completion = NULL; } gpr_mu_unlock(&r->mu); @@ -123,7 +123,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); *r->target_result = grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); - grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); r->next_completion = NULL; } } @@ -198,12 +198,7 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args, sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver)); memset(r, 0, sizeof(*r)); r->addresses = addresses; - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = args->uri->path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); gpr_mu_init(&r->mu); grpc_resolver_init(&r->base, &sockaddr_resolver_vtable); return &r->base; @@ -219,7 +214,8 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {} #define DECL_FACTORY(name) \ static grpc_resolver *name##_factory_create_resolver( \ - grpc_resolver_factory *factory, grpc_resolver_args *args) { \ + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \ + grpc_resolver_args *args) { \ return sockaddr_create(args, parse_##name); \ } \ static const grpc_resolver_factory_vtable name##_factory_vtable = { \ diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c index 213395c20f..c807342ebc 100644 --- a/src/core/ext/transport/chttp2/client/chttp2_connector.c +++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c @@ -56,10 +56,10 @@ typedef struct { gpr_refcount refs; bool shutdown; + bool connecting; - char *server_name; - grpc_chttp2_create_handshakers_func create_handshakers; - void *create_handshakers_user_data; + grpc_chttp2_add_handshakers_func add_handshakers; + void *add_handshakers_user_data; grpc_closure *notify; grpc_connect_in_args args; @@ -88,7 +88,6 @@ static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx, // If handshaking is not yet in progress, destroy the endpoint. // Otherwise, the handshaker will do this for us. if (c->endpoint != NULL) grpc_endpoint_destroy(exec_ctx, c->endpoint); - gpr_free(c->server_name); gpr_free(c); } } @@ -103,7 +102,9 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx, } // If handshaking is not yet in progress, shutdown the endpoint. // Otherwise, the handshaker will do this for us. - if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint); + if (!c->connecting && c->endpoint != NULL) { + grpc_endpoint_shutdown(exec_ctx, c->endpoint); + } gpr_mu_unlock(&c->mu); } @@ -140,7 +141,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, } grpc_closure *notify = c->notify; c->notify = NULL; - grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); + grpc_closure_sched(exec_ctx, notify, error); grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); c->handshake_mgr = NULL; gpr_mu_unlock(&c->mu); @@ -152,14 +153,13 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx, c->handshake_mgr = grpc_handshake_manager_create(); char *proxy_name = grpc_get_http_proxy_server(); if (proxy_name != NULL) { - grpc_handshake_manager_add( - c->handshake_mgr, - grpc_http_connect_handshaker_create(proxy_name, c->server_name)); + grpc_handshake_manager_add(c->handshake_mgr, + grpc_http_connect_handshaker_create(proxy_name)); gpr_free(proxy_name); } - if (c->create_handshakers != NULL) { - c->create_handshakers(exec_ctx, c->create_handshakers_user_data, - c->handshake_mgr); + if (c->add_handshakers != NULL) { + c->add_handshakers(exec_ctx, c->add_handshakers_user_data, + c->handshake_mgr); } grpc_handshake_manager_do_handshake( exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args, @@ -180,7 +180,7 @@ static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg, memset(c->result, 0, sizeof(*c->result)); grpc_closure *notify = c->notify; c->notify = NULL; - grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); + grpc_closure_sched(exec_ctx, notify, error); gpr_mu_unlock(&c->mu); chttp2_connector_unref(exec_ctx, arg); } else { @@ -192,6 +192,8 @@ static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg, static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { chttp2_connector *c = arg; gpr_mu_lock(&c->mu); + GPR_ASSERT(c->connecting); + c->connecting = false; if (error != GRPC_ERROR_NONE || c->shutdown) { if (error == GRPC_ERROR_NONE) { error = GRPC_ERROR_CREATE("connector shutdown"); @@ -201,14 +203,15 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { memset(c->result, 0, sizeof(*c->result)); grpc_closure *notify = c->notify; c->notify = NULL; - grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); + grpc_closure_sched(exec_ctx, notify, error); + if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint); gpr_mu_unlock(&c->mu); chttp2_connector_unref(exec_ctx, arg); } else { GPR_ASSERT(c->endpoint != NULL); if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) { grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent, - c); + c, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&c->initial_string_buffer); grpc_slice_buffer_add(&c->initial_string_buffer, c->args.initial_connect_string); @@ -234,7 +237,9 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx, c->result = result; GPR_ASSERT(c->endpoint == NULL); chttp2_connector_ref(con); // Ref taken for callback. - grpc_closure_init(&c->connected, connected, c); + grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx); + GPR_ASSERT(!c->connecting); + c->connecting = true; grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint, args->interested_parties, args->channel_args, args->addr, args->deadline); @@ -246,16 +251,14 @@ static const grpc_connector_vtable chttp2_connector_vtable = { chttp2_connector_connect}; grpc_connector *grpc_chttp2_connector_create( - grpc_exec_ctx *exec_ctx, const char *server_name, - grpc_chttp2_create_handshakers_func create_handshakers, - void *create_handshakers_user_data) { + grpc_exec_ctx *exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers, + void *add_handshakers_user_data) { chttp2_connector *c = gpr_malloc(sizeof(*c)); memset(c, 0, sizeof(*c)); c->base.vtable = &chttp2_connector_vtable; gpr_mu_init(&c->mu); gpr_ref_init(&c->refs, 1); - c->server_name = gpr_strdup(server_name); - c->create_handshakers = create_handshakers; - c->create_handshakers_user_data = create_handshakers_user_data; + c->add_handshakers = add_handshakers; + c->add_handshakers_user_data = add_handshakers_user_data; return &c->base; } diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.h b/src/core/ext/transport/chttp2/client/chttp2_connector.h index 6c34ce1af1..58eba22417 100644 --- a/src/core/ext/transport/chttp2/client/chttp2_connector.h +++ b/src/core/ext/transport/chttp2/client/chttp2_connector.h @@ -38,15 +38,14 @@ #include "src/core/lib/channel/handshaker.h" #include "src/core/lib/iomgr/exec_ctx.h" -typedef void (*grpc_chttp2_create_handshakers_func)( +typedef void (*grpc_chttp2_add_handshakers_func)( grpc_exec_ctx* exec_ctx, void* user_data, grpc_handshake_manager* handshake_mgr); -/// If \a create_handshakers is non-NULL, it will be called with -/// \a create_handshakers_user_data to add handshakers. +/// If \a add_handshakers is non-NULL, it will be called with +/// \a add_handshakers_user_data to add handshakers. grpc_connector* grpc_chttp2_connector_create( - grpc_exec_ctx* exec_ctx, const char* server_name, - grpc_chttp2_create_handshakers_func create_handshakers, - void* create_handshakers_user_data); + grpc_exec_ctx* exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers, + void* add_handshakers_user_data); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */ diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c index 29f3759d00..a0d0652ce7 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c @@ -39,8 +39,8 @@ #include <grpc/support/string_util.h> #include "src/core/ext/client_channel/client_channel.h" -#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" +#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/channel.h" @@ -54,8 +54,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const grpc_subchannel_args *args) { grpc_connector *connector = grpc_chttp2_connector_create( - exec_ctx, args->server_name, NULL /* create_handshakers */, - NULL /* user_data */); + exec_ctx, NULL /* add_handshakers */, NULL /* user_data */); grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args); grpc_connector_unref(exec_ctx, connector); return s; @@ -65,17 +64,15 @@ static grpc_channel *client_channel_factory_create_channel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const char *target, grpc_client_channel_type type, const grpc_channel_args *args) { - grpc_channel *channel = - grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL); - grpc_resolver *resolver = grpc_resolver_create(target, args); - if (resolver == NULL) { - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, - "client_channel_factory_create_channel"); - return NULL; - } - grpc_client_channel_finish_initialization( - exec_ctx, grpc_channel_get_channel_stack(channel), resolver, cc_factory); - GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel"); + // Add channel arg containing the server URI. + grpc_arg arg; + arg.type = GRPC_ARG_STRING; + arg.key = GRPC_ARG_SERVER_URI; + arg.value.string = (char *)target; + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, + GRPC_CLIENT_CHANNEL, NULL); + grpc_channel_args_destroy(new_args); return channel; } @@ -101,8 +98,14 @@ grpc_channel *grpc_insecure_channel_create(const char *target, GPR_ASSERT(reserved == NULL); grpc_client_channel_factory *factory = (grpc_client_channel_factory *)&client_channel_factory; + // Add channel arg containing the client channel factory. + grpc_arg arg = grpc_client_channel_factory_create_channel_arg(factory); + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + // Create channel. grpc_channel *channel = client_channel_factory_create_channel( - &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args); + &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); + // Clean up. + grpc_channel_args_destroy(new_args); grpc_client_channel_factory_unref(&exec_ctx, factory); grpc_exec_ctx_finish(&exec_ctx); return channel != NULL ? channel : grpc_lame_client_channel_create( diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c index 35e1e1f716..f35439cd44 100644 --- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c @@ -39,7 +39,6 @@ #include <grpc/support/string_util.h> #include "src/core/ext/client_channel/client_channel.h" -#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/security/credentials/credentials.h" @@ -69,11 +68,10 @@ static void client_channel_factory_unref( } } -static void create_handshakers(grpc_exec_ctx *exec_ctx, - void *security_connector, - grpc_handshake_manager *handshake_mgr) { - grpc_channel_security_connector_create_handshakers( - exec_ctx, security_connector, handshake_mgr); +static void add_handshakers(grpc_exec_ctx *exec_ctx, void *security_connector, + grpc_handshake_manager *handshake_mgr) { + grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector, + handshake_mgr); } static grpc_subchannel *client_channel_factory_create_subchannel( @@ -81,7 +79,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( const grpc_subchannel_args *args) { client_channel_factory *f = (client_channel_factory *)cc_factory; grpc_connector *connector = grpc_chttp2_connector_create( - exec_ctx, args->server_name, create_handshakers, f->security_connector); + exec_ctx, add_handshakers, f->security_connector); grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args); grpc_connector_unref(exec_ctx, connector); return s; @@ -91,18 +89,15 @@ static grpc_channel *client_channel_factory_create_channel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const char *target, grpc_client_channel_type type, const grpc_channel_args *args) { - client_channel_factory *f = (client_channel_factory *)cc_factory; - grpc_channel *channel = - grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL); - grpc_resolver *resolver = grpc_resolver_create(target, args); - if (resolver == NULL) { - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, - "client_channel_factory_create_channel"); - return NULL; - } - grpc_client_channel_finish_initialization( - exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base); - GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel"); + // Add channel arg containing the server URI. + grpc_arg arg; + arg.type = GRPC_ARG_STRING; + arg.key = GRPC_ARG_SERVER_URI; + arg.value.string = (char *)target; + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, + GRPC_CLIENT_CHANNEL, NULL); + grpc_channel_args_destroy(new_args); return channel; } @@ -143,14 +138,6 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds, return grpc_lame_client_channel_create( target, GRPC_STATUS_INTERNAL, "Failed to create security connector."); } - grpc_arg connector_arg = - grpc_security_connector_to_arg(&security_connector->base); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add( - new_args_from_connector != NULL ? new_args_from_connector : args, - &connector_arg, 1); - if (new_args_from_connector != NULL) { - grpc_channel_args_destroy(new_args_from_connector); - } // Create client channel factory. client_channel_factory *f = gpr_malloc(sizeof(*f)); memset(f, 0, sizeof(*f)); @@ -159,13 +146,24 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds, GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "grpc_secure_channel_create"); f->security_connector = security_connector; + // Add channel args containing the client channel factory and security + // connector. + grpc_arg new_args[2]; + new_args[0] = grpc_client_channel_factory_create_channel_arg(&f->base); + new_args[1] = grpc_security_connector_to_arg(&security_connector->base); + grpc_channel_args *args_copy = grpc_channel_args_copy_and_add( + new_args_from_connector != NULL ? new_args_from_connector : args, + new_args, GPR_ARRAY_SIZE(new_args)); + if (new_args_from_connector != NULL) { + grpc_channel_args_destroy(new_args_from_connector); + } // Create channel. grpc_channel *channel = client_channel_factory_create_channel( - &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); + &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args_copy); // Clean up. GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base, "secure_client_channel_factory_create_channel"); - grpc_channel_args_destroy(new_args); + grpc_channel_args_destroy(args_copy); grpc_client_channel_factory_unref(&exec_ctx, &f->base); grpc_exec_ctx_finish(&exec_ctx); return channel; /* may be NULL */ diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c index 8ee7e29316..9af62d372e 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.c +++ b/src/core/ext/transport/chttp2/server/chttp2_server.c @@ -53,13 +53,13 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/server.h" -void grpc_chttp2_server_handshaker_factory_create_handshakers( +void grpc_chttp2_server_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr) { if (handshaker_factory != NULL) { - handshaker_factory->vtable->create_handshakers(exec_ctx, handshaker_factory, - handshake_mgr); + handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory, + handshake_mgr); } } @@ -139,7 +139,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, const char *error_str = grpc_error_string(error); gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); grpc_error_free_string(error_str); - if (error == GRPC_ERROR_NONE) { + if (error == GRPC_ERROR_NONE && args->endpoint != NULL) { // We were shut down after handshaking completed successfully, so // destroy the endpoint here. // TODO(ctiller): It is currently necessary to shutdown endpoints @@ -153,19 +153,26 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, gpr_free(args->read_buffer); } } else { - grpc_transport *transport = - grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0); - grpc_server_setup_transport( - exec_ctx, connection_state->server_state->server, transport, - connection_state->accepting_pollset, args->args); - grpc_chttp2_transport_start_reading(exec_ctx, transport, args->read_buffer); - grpc_channel_args_destroy(args->args); + // If the handshaking succeeded but there is no endpoint, then the + // handshaker may have handed off the connection to some external + // code, so we can just clean up here without creating a transport. + if (args->endpoint != NULL) { + grpc_transport *transport = + grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0); + grpc_server_setup_transport( + exec_ctx, connection_state->server_state->server, transport, + connection_state->accepting_pollset, args->args); + grpc_chttp2_transport_start_reading(exec_ctx, transport, + args->read_buffer); + grpc_channel_args_destroy(args->args); + } } pending_handshake_manager_remove_locked(connection_state->server_state, connection_state->handshake_mgr); gpr_mu_unlock(&connection_state->server_state->mu); grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server); + gpr_free(connection_state->acceptor); gpr_free(connection_state); } @@ -177,6 +184,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, if (state->shutdown) { gpr_mu_unlock(&state->mu); grpc_endpoint_destroy(exec_ctx, tcp); + gpr_free(acceptor); return; } grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create(); @@ -189,7 +197,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, connection_state->accepting_pollset = accepting_pollset; connection_state->acceptor = acceptor; connection_state->handshake_mgr = handshake_mgr; - grpc_chttp2_server_handshaker_factory_create_handshakers( + grpc_chttp2_server_handshaker_factory_add_handshakers( exec_ctx, state->handshaker_factory, connection_state->handshake_mgr); // TODO(roth): We should really get this timeout value from channel // args instead of hard-coding it. @@ -273,7 +281,8 @@ grpc_error *grpc_chttp2_server_add_port( state = gpr_malloc(sizeof(*state)); memset(state, 0, sizeof(*state)); grpc_closure_init(&state->tcp_server_shutdown_complete, - tcp_server_shutdown_complete, state); + tcp_server_shutdown_complete, state, + grpc_schedule_on_exec_ctx); err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete, args, &tcp_server); if (err != GRPC_ERROR_NONE) { diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h index 3073399267..aa364b565d 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.h +++ b/src/core/ext/transport/chttp2/server/chttp2_server.h @@ -45,7 +45,7 @@ typedef struct grpc_chttp2_server_handshaker_factory grpc_chttp2_server_handshaker_factory; typedef struct { - void (*create_handshakers)( + void (*add_handshakers)( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr); @@ -57,7 +57,7 @@ struct grpc_chttp2_server_handshaker_factory { const grpc_chttp2_server_handshaker_factory_vtable *vtable; }; -void grpc_chttp2_server_handshaker_factory_create_handshakers( +void grpc_chttp2_server_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr); diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c index 85c21f0ca2..a33a7a3f7d 100644 --- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c @@ -54,12 +54,12 @@ typedef struct { grpc_server_security_connector *security_connector; } server_security_handshaker_factory; -static void server_security_handshaker_factory_create_handshakers( +static void server_security_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *hf, grpc_handshake_manager *handshake_mgr) { server_security_handshaker_factory *handshaker_factory = (server_security_handshaker_factory *)hf; - grpc_server_security_connector_create_handshakers( + grpc_server_security_connector_add_handshakers( exec_ctx, handshaker_factory->security_connector, handshake_mgr); } @@ -74,7 +74,7 @@ static void server_security_handshaker_factory_destroy( static const grpc_chttp2_server_handshaker_factory_vtable server_security_handshaker_factory_vtable = { - server_security_handshaker_factory_create_handshakers, + server_security_handshaker_factory_add_handshakers, server_security_handshaker_factory_destroy}; int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 3e7c078d3c..488c3b93cc 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -73,20 +73,14 @@ static const grpc_transport_vtable vtable; static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); -static void write_action_end(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); -static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs, grpc_error *error); -static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs, - grpc_error *error); /** Set a transport level setting, and push it to our peer */ static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_setting_id id, uint32_t value); @@ -112,12 +106,8 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx, void *byte_stream, grpc_error *error_ignored); -static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); -static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); @@ -166,8 +156,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx, and maybe they hold resources that need to be freed */ while (t->pings.next != &t->pings) { grpc_chttp2_outstanding_ping *ping = t->pings.next; - grpc_exec_ctx_sched(exec_ctx, ping->on_recv, - GRPC_ERROR_CREATE("Transport closed"), NULL); + grpc_closure_sched(exec_ctx, ping->on_recv, + GRPC_ERROR_CREATE("Transport closed")); ping->next->prev = ping->prev; ping->prev->next = ping->next; gpr_free(ping); @@ -246,18 +236,15 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_slice_buffer_init(&t->outbuf); grpc_chttp2_hpack_compressor_init(&t->hpack_compressor); - grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked, - t); - grpc_closure_init(&t->write_action, write_action, t); - grpc_closure_init(&t->write_action_end, write_action_end, t); - grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t); - grpc_closure_init(&t->read_action_begin, read_action_begin, t); - grpc_closure_init(&t->read_action_locked, read_action_locked, t); - grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t); - grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t); - grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t); + grpc_closure_init(&t->write_action, write_action, t, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&t->read_action_locked, read_action_locked, t, + grpc_combiner_scheduler(t->combiner, false)); + grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t, + grpc_combiner_scheduler(t->combiner, false)); grpc_closure_init(&t->destructive_reclaimer_locked, - destructive_reclaimer_locked, t); + destructive_reclaimer_locked, t, + grpc_combiner_scheduler(t->combiner, false)); grpc_chttp2_goaway_parser_init(&t->goaway_parser); grpc_chttp2_hpack_parser_init(&t->hpack_parser); @@ -395,9 +382,10 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp, static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_combiner_execute(exec_ctx, t->combiner, - grpc_closure_create(destroy_transport_locked, t), - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, grpc_closure_create( + destroy_transport_locked, t, + grpc_combiner_scheduler(t->combiner, false)), + GRPC_ERROR_NONE); } static void close_transport_locked(grpc_exec_ctx *exec_ctx, @@ -425,7 +413,6 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, /* flush writable stream list to avoid dangling references */ grpc_chttp2_stream *s; while (grpc_chttp2_list_pop_writable_stream(t, &s)) { - grpc_chttp2_leave_writing_lists(exec_ctx, t, s); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close"); } end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error)); @@ -472,8 +459,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_chttp2_data_parser_init(&s->data_parser); grpc_slice_buffer_init(&s->flow_controlled_buffer); s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - grpc_closure_init(&s->complete_fetch, complete_fetch, s); - grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s); + grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s, + grpc_schedule_on_exec_ctx); GRPC_CHTTP2_REF_TRANSPORT(t, "stream"); @@ -521,10 +508,6 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, } } - if (s->fail_pending_writes_on_writes_finished_error != NULL) { - GRPC_ERROR_UNREF(s->fail_pending_writes_on_writes_finished_error); - } - GPR_ASSERT(s->send_initial_metadata_finished == NULL); GPR_ASSERT(s->fetching_send_message == NULL); GPR_ASSERT(s->send_trailing_metadata_finished == NULL); @@ -552,9 +535,10 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; s->destroy_stream_arg = and_free_memory; - grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s); - grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream, - GRPC_ERROR_NONE, false); + grpc_closure_sched( + exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s, + grpc_combiner_scheduler(t->combiner, false)), + GRPC_ERROR_NONE); GPR_TIMER_END("destroy_stream", 0); } @@ -604,11 +588,13 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, write_state_name(t->write_state), write_state_name(st), reason)); t->write_state = st; - if (st == GRPC_CHTTP2_WRITE_STATE_IDLE && - t->close_transport_on_writes_finished != NULL) { - grpc_error *err = t->close_transport_on_writes_finished; - t->close_transport_on_writes_finished = NULL; - close_transport_locked(exec_ctx, t, err); + if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) { + grpc_closure_list_sched(exec_ctx, &t->run_after_write); + if (t->close_transport_on_writes_finished != NULL) { + grpc_error *err = t->close_transport_on_writes_finished; + t->close_transport_on_writes_finished = NULL; + close_transport_locked(exec_ctx, t, err); + } } } @@ -621,9 +607,12 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, case GRPC_CHTTP2_WRITE_STATE_IDLE: set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason); GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); - grpc_combiner_execute_finally(exec_ctx, t->combiner, - &t->write_action_begin_locked, - GRPC_ERROR_NONE, covered_by_poller); + grpc_closure_sched( + exec_ctx, + grpc_closure_init( + &t->write_action_begin_locked, write_action_begin_locked, t, + grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)), + GRPC_ERROR_NONE); break; case GRPC_CHTTP2_WRITE_STATE_WRITING: set_write_state( @@ -665,7 +654,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt, if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) { set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, "begin writing"); - grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE); } else { set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing"); @@ -677,19 +666,13 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt, static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) { grpc_chttp2_transport *t = gt; GPR_TIMER_BEGIN("write_action", 0); - grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end); + grpc_endpoint_write( + exec_ctx, t->ep, &t->outbuf, + grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t, + grpc_combiner_scheduler(t->combiner, false))); GPR_TIMER_END("write_action", 0); } -static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt, - grpc_error *error) { - grpc_chttp2_transport *t = gt; - GPR_TIMER_BEGIN("write_action_end", 0); - grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked, - GRPC_ERROR_REF(error), false); - GPR_TIMER_END("write_action_end", 0); -} - static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, grpc_error *error) { GPR_TIMER_BEGIN("terminate_writing_with_lock", 0); @@ -719,18 +702,24 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, "continue writing [!covered]"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); - grpc_combiner_execute_finally(exec_ctx, t->combiner, - &t->write_action_begin_locked, - GRPC_ERROR_NONE, false); + grpc_closure_run( + exec_ctx, + grpc_closure_init( + &t->write_action_begin_locked, write_action_begin_locked, t, + grpc_combiner_finally_scheduler(t->combiner, false)), + GRPC_ERROR_NONE); break; case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER: GPR_TIMER_MARK("state=writing_stale_with_poller", 0); set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, "continue writing [covered]"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); - grpc_combiner_execute_finally(exec_ctx, t->combiner, - &t->write_action_begin_locked, - GRPC_ERROR_NONE, true); + grpc_closure_run( + exec_ctx, + grpc_closure_init(&t->write_action_begin_locked, + write_action_begin_locked, t, + grpc_combiner_finally_scheduler(t->combiner, true)), + GRPC_ERROR_NONE); break; } @@ -825,7 +814,14 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, } } +/* Flag that this closure barrier wants stats to be updated before finishing */ #define CLOSURE_BARRIER_STATS_BIT (1 << 0) +/* Flag that this closure barrier may be covering a write in a pollset, and so + we should not complete this closure until we can prove that the write got + scheduled */ +#define CLOSURE_BARRIER_MAY_COVER_WRITE (1 << 1) +/* First bit of the reference count, stored in the high order bits (with the low + bits being used for flags defined above) */ #define CLOSURE_BARRIER_FIRST_REF_BIT (1 << 16) static grpc_closure *add_closure_barrier(grpc_closure *closure) { @@ -852,6 +848,16 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, return; } closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT; + if (grpc_http_trace) { + const char *errstr = grpc_error_string(error); + gpr_log(GPR_DEBUG, + "complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s", + closure, + (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT), + (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), + desc, errstr); + grpc_error_free_string(errstr); + } if (error != GRPC_ERROR_NONE) { if (closure->error_data.error == GRPC_ERROR_NONE) { closure->error_data.error = @@ -868,7 +874,13 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, grpc_transport_move_stats(&s->stats, s->collecting_stats); s->collecting_stats = NULL; } - grpc_closure_run(exec_ctx, closure, closure->error_data.error); + if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) || + !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) { + grpc_closure_run(exec_ctx, closure, closure->error_data.error); + } else { + grpc_closure_list_append(&t->run_after_write, closure, + closure->error_data.error); + } } } @@ -945,15 +957,6 @@ static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs, } } -static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs, - grpc_error *error) { - grpc_chttp2_stream *s = gs; - grpc_chttp2_transport *t = s->t; - grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked, - GRPC_ERROR_REF(error), - s->complete_fetch_covered_by_poller); -} - static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id, @@ -989,7 +992,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, grpc_closure *on_complete = op->on_complete; if (on_complete == NULL) { - on_complete = grpc_closure_create(do_nothing, NULL); + on_complete = + grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx); } /* use final_data as a barrier until enqueue time; the inital counter is @@ -1013,6 +1017,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, if (op->send_initial_metadata != NULL) { GPR_ASSERT(s->send_initial_metadata_finished == NULL); + on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->send_initial_metadata_finished = add_closure_barrier(on_complete); s->send_initial_metadata = op->send_initial_metadata; const size_t metadata_size = @@ -1066,6 +1071,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->send_message != NULL) { + on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->fetching_send_message_finished = add_closure_barrier(op->on_complete); if (s->write_closed) { grpc_chttp2_complete_closure_step( @@ -1103,6 +1109,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, if (op->send_trailing_metadata != NULL) { GPR_ASSERT(s->send_trailing_metadata_finished == NULL); + on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->send_trailing_metadata_finished = add_closure_barrier(on_complete); s->send_trailing_metadata = op->send_trailing_metadata; const size_t metadata_size = @@ -1189,13 +1196,15 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, gpr_free(str); } - grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked, - op); op->transport_private.args[0] = gt; op->transport_private.args[1] = gs; GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); - grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure, - GRPC_ERROR_NONE, op->covered_by_poller); + grpc_closure_sched( + exec_ctx, + grpc_closure_init( + &op->transport_private.closure, perform_stream_op_locked, op, + grpc_combiner_scheduler(t->combiner, op->covered_by_poller)), + GRPC_ERROR_NONE); GPR_TIMER_END("perform_stream_op", 0); } @@ -1224,7 +1233,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_outstanding_ping *ping; for (ping = t->pings.next; ping != &t->pings; ping = ping->next) { if (0 == memcmp(opaque_8bytes, ping->id, 8)) { - grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE); ping->next->prev = ping->prev; ping->prev->next = ping->next; gpr_free(ping); @@ -1298,11 +1307,12 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, char *msg = grpc_transport_op_string(op); gpr_free(msg); op->transport_private.args[0] = gt; - grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked, - op); GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op"); - grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure, - GRPC_ERROR_NONE, false); + grpc_closure_sched( + exec_ctx, grpc_closure_init(&op->transport_private.closure, + perform_transport_op_locked, op, + grpc_combiner_scheduler(t->combiner, false)), + GRPC_ERROR_NONE); } /******************************************************************************* @@ -1406,7 +1416,6 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, } } if (grpc_chttp2_list_remove_writable_stream(t, s)) { - grpc_chttp2_leave_writing_lists(exec_ctx, t, s); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream"); } @@ -1537,41 +1546,9 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s, return error; } -void grpc_chttp2_leave_writing_lists(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - if (s->need_fail_pending_writes_on_writes_finished) { - grpc_error *error = s->fail_pending_writes_on_writes_finished_error; - s->fail_pending_writes_on_writes_finished_error = NULL; - s->need_fail_pending_writes_on_writes_finished = false; - grpc_chttp2_fail_pending_writes(exec_ctx, t, s, error); - } -} - void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_error *error) { - if (s->need_fail_pending_writes_on_writes_finished || - (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE && - (s->included[GRPC_CHTTP2_LIST_WRITABLE] || - s->included[GRPC_CHTTP2_LIST_WRITING]))) { - /* If a write is in progress, and it involves this stream, wait for the - * write to complete before cancelling things out. If we don't do this, then - * our combiner lock might think that some operation on its queue might be - * covering a completion even though there is none, in which case we might - * offload to another thread, which isn't guarateed to exist */ - if (error != GRPC_ERROR_NONE) { - if (s->fail_pending_writes_on_writes_finished_error == GRPC_ERROR_NONE) { - s->fail_pending_writes_on_writes_finished_error = GRPC_ERROR_CREATE( - "Post-poned fail writes due to in-progress write"); - } - s->fail_pending_writes_on_writes_finished_error = grpc_error_add_child( - s->fail_pending_writes_on_writes_finished_error, error); - } - s->need_fail_pending_writes_on_writes_finished = true; - return; /* early out */ - } - error = removal_error(error, s, "Pending writes failed due to stream closure"); s->send_initial_metadata = NULL; @@ -1632,6 +1609,9 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, if (s->id != 0) { remove_stream(exec_ctx, t, s->id, removal_error(GRPC_ERROR_REF(error), s, "Stream removed")); + } else { + /* Purge streams waiting on concurrency still waiting for id assignment */ + grpc_chttp2_list_remove_waiting_for_concurrency(t, s); } GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2"); } @@ -1808,19 +1788,6 @@ static void update_global_window(void *args, uint32_t id, void *stream) { * INPUT PROCESSING - PARSING */ -static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - /* Control flow: - reading_action_locked -> - (parse_unlocked -> post_parse_locked)? -> - post_reading_action_locked */ - GPR_TIMER_BEGIN("reading_action", 0); - grpc_chttp2_transport *t = tp; - grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked, - GRPC_ERROR_REF(error), false); - GPR_TIMER_END("reading_action", 0); -} - static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { grpc_http_parser parser; @@ -1920,7 +1887,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp, grpc_slice_buffer_reset_and_unref(&t->read_buffer); if (keep_reading) { - grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin); + grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, + &t->read_action_locked); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading"); } else { GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action"); @@ -2057,10 +2025,12 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx, bs->next_action.slice = slice; bs->next_action.max_size_hint = max_size_hint; bs->next_action.on_complete = on_complete; - grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked, - bs); - grpc_combiner_execute(exec_ctx, bs->transport->combiner, - &bs->next_action.closure, GRPC_ERROR_NONE, false); + grpc_closure_sched( + exec_ctx, + grpc_closure_init( + &bs->next_action.closure, incoming_byte_stream_next_locked, bs, + grpc_combiner_scheduler(bs->transport->combiner, false)), + GRPC_ERROR_NONE); GPR_TIMER_END("incoming_byte_stream_next", 0); return 0; } @@ -2082,10 +2052,12 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0); grpc_chttp2_incoming_byte_stream *bs = (grpc_chttp2_incoming_byte_stream *)byte_stream; - grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked, - bs); - grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action, - GRPC_ERROR_NONE, false); + grpc_closure_sched( + exec_ctx, + grpc_closure_init( + &bs->destroy_action, incoming_byte_stream_destroy_locked, bs, + grpc_combiner_scheduler(bs->transport->combiner, false)), + GRPC_ERROR_NONE); GPR_TIMER_END("incoming_byte_stream_destroy", 0); } @@ -2093,7 +2065,7 @@ static void incoming_byte_stream_publish_error( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, grpc_error *error) { GPR_ASSERT(error != GRPC_ERROR_NONE); - grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL); + grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error)); bs->on_next = NULL; GRPC_ERROR_UNREF(bs->error); bs->error = error; @@ -2110,7 +2082,7 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx, bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice); if (bs->on_next != NULL) { *bs->next = slice; - grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE); bs->on_next = NULL; } else { grpc_slice_buffer_add(&bs->slices, slice); @@ -2178,7 +2150,7 @@ static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx, GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer"); grpc_resource_user_post_reclaimer(exec_ctx, grpc_endpoint_get_resource_user(t->ep), - false, &t->benign_reclaimer); + false, &t->benign_reclaimer_locked); } } @@ -2189,24 +2161,10 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer"); grpc_resource_user_post_reclaimer(exec_ctx, grpc_endpoint_get_resource_user(t->ep), - true, &t->destructive_reclaimer); + true, &t->destructive_reclaimer_locked); } } -static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = arg; - grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked, - GRPC_ERROR_REF(error), false); -} - -static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = arg; - grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked, - GRPC_ERROR_REF(error), false); -} - static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_chttp2_transport *t = arg; @@ -2387,5 +2345,5 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx, grpc_slice_buffer_move_into(read_buffer, &t->read_buffer); gpr_free(read_buffer); } - read_action_begin(exec_ctx, t, GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE); } diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c index 6a9200b8db..64ce07b2f7 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c @@ -1634,10 +1634,11 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx, however -- it might be that we receive a RST_STREAM following this and can avoid the extra write */ GRPC_CHTTP2_STREAM_REF(s, "final_rst"); - grpc_combiner_execute_finally( - exec_ctx, t->combiner, - grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE, - false); + grpc_closure_sched( + exec_ctx, grpc_closure_create(force_client_rst_stream, s, + grpc_combiner_finally_scheduler( + t->combiner, false)), + GRPC_ERROR_NONE); } grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, GRPC_ERROR_NONE); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 6cba1e7fd2..a52acbacdb 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -212,10 +212,8 @@ struct grpc_chttp2_transport { grpc_closure write_action_begin_locked; grpc_closure write_action; - grpc_closure write_action_end; grpc_closure write_action_end_locked; - grpc_closure read_action_begin; grpc_closure read_action_locked; /** incoming read bytes */ @@ -327,16 +325,17 @@ struct grpc_chttp2_transport { */ grpc_error *close_transport_on_writes_finished; + /* a list of closures to run after writes are finished */ + grpc_closure_list run_after_write; + /* buffer pool state */ /** have we scheduled a benign cleanup? */ bool benign_reclaimer_registered; /** have we scheduled a destructive cleanup? */ bool destructive_reclaimer_registered; /** benign cleanup closure */ - grpc_closure benign_reclaimer; grpc_closure benign_reclaimer_locked; /** destructive cleanup closure */ - grpc_closure destructive_reclaimer; grpc_closure destructive_reclaimer_locked; }; @@ -409,9 +408,6 @@ struct grpc_chttp2_stream { grpc_error *read_closed_error; /** the error that resulted in this stream being write-closed */ grpc_error *write_closed_error; - /** should any writes be cleared once this stream becomes non-writable */ - bool need_fail_pending_writes_on_writes_finished; - grpc_error *fail_pending_writes_on_writes_finished_error; grpc_published_metadata_method published_metadata[2]; bool final_metadata_requested; @@ -496,6 +492,8 @@ void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t, grpc_chttp2_stream *s); int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, grpc_chttp2_stream **s); +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, + grpc_chttp2_stream *s); void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream *s); @@ -692,9 +690,6 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s); -void grpc_chttp2_leave_writing_lists(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_error *error); diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c index 6d25b3ae57..a60264cc51 100644 --- a/src/core/ext/transport/chttp2/transport/stream_lists.c +++ b/src/core/ext/transport/chttp2/transport/stream_lists.c @@ -158,6 +158,11 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); } +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, + grpc_chttp2_stream *s) { + stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); +} + void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream *s) { stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 769b229a0d..139e7387c4 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -208,7 +208,6 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing"); } } else { - grpc_chttp2_leave_writing_lists(exec_ctx, t, s); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write"); } } @@ -253,7 +252,6 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1, GRPC_ERROR_REF(error)); } - grpc_chttp2_leave_writing_lists(exec_ctx, t, s); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end"); } grpc_slice_buffer_reset_and_unref(&t->outbuf); diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index a4c110101e..df0a769f6c 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -149,6 +149,9 @@ struct write_state { struct op_state { bool state_op_done[OP_NUM_OPS]; bool state_callback_received[OP_NUM_OPS]; + bool fail_state; + bool flush_read; + grpc_error *cancel_error; /* data structure for storing data coming from server */ struct read_state rs; /* data structure for storing data going to the server */ @@ -248,6 +251,12 @@ static void free_read_buffer(stream_obj *s) { } } +static grpc_error *make_error_with_desc(int error_code, const char *desc) { + grpc_error *error = GRPC_ERROR_CREATE(desc); + error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code); + return error; +} + /* Add a new stream op to op storage. */ @@ -433,6 +442,18 @@ static void on_response_headers_received( grpc_mdstr_from_string(headers->headers[i].value))); } s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true; + if (!(s->state.state_op_done[OP_CANCEL_ERROR] || + s->state.state_callback_received[OP_FAILED])) { + /* Do an extra read to trigger on_succeeded() callback in case connection + is closed */ + GPR_ASSERT(s->state.rs.length_field_received == false); + s->state.rs.read_buffer = s->state.rs.grpc_header_bytes; + s->state.rs.received_bytes = 0; + s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES; + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, + s->state.rs.remaining_bytes); + } gpr_mu_unlock(&s->mu); execute_from_storage(s); } @@ -464,7 +485,11 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data, count); gpr_mu_lock(&s->mu); s->state.state_callback_received[OP_RECV_MESSAGE] = true; - if (count > 0) { + if (count > 0 && s->state.flush_read) { + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096); + gpr_mu_unlock(&s->mu); + } else if (count > 0) { s->state.rs.received_bytes += count; s->state.rs.remaining_bytes -= count; if (s->state.rs.remaining_bytes > 0) { @@ -479,6 +504,10 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data, execute_from_storage(s); } } else { + if (s->state.flush_read) { + gpr_free(s->state.rs.read_buffer); + s->state.rs.read_buffer = NULL; + } s->state.rs.read_stream_closed = true; gpr_mu_unlock(&s->mu); execute_from_storage(s); @@ -508,10 +537,27 @@ static void on_response_trailers_received( grpc_mdstr_from_string(trailers->headers[i].key), grpc_mdstr_from_string(trailers->headers[i].value))); s->state.rs.trailing_metadata_valid = true; + if (0 == strcmp(trailers->headers[i].key, "grpc-status") && + 0 != strcmp(trailers->headers[i].value, "0")) { + s->state.fail_state = true; + } } s->state.state_callback_received[OP_RECV_TRAILING_METADATA] = true; - gpr_mu_unlock(&s->mu); - execute_from_storage(s); + /* Send a EOS when server terminates the stream (testServerFinishesRequest) to + * trigger on_succeeded */ + if (!s->state.state_op_done[OP_SEND_TRAILING_METADATA] && + !(s->state.state_op_done[OP_CANCEL_ERROR] || + s->state.state_callback_received[OP_FAILED])) { + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, 0)", s->cbs); + s->state.state_callback_received[OP_SEND_MESSAGE] = false; + cronet_bidirectional_stream_write(s->cbs, "", 0, true); + s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true; + + gpr_mu_unlock(&s->mu); + } else { + gpr_mu_unlock(&s->mu); + execute_from_storage(s); + } } /* @@ -632,9 +678,9 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op, /* When call is canceled, every op can be run, except under following conditions */ - bool is_canceled_of_failed = stream_state->state_op_done[OP_CANCEL_ERROR] || + bool is_canceled_or_failed = stream_state->state_op_done[OP_CANCEL_ERROR] || stream_state->state_callback_received[OP_FAILED]; - if (is_canceled_of_failed) { + if (is_canceled_or_failed) { if (op_id == OP_SEND_INITIAL_METADATA) result = false; if (op_id == OP_SEND_MESSAGE) result = false; if (op_id == OP_SEND_TRAILING_METADATA) result = false; @@ -778,16 +824,10 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_SEND_INITIAL_METADATA)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA", oas); - /* This OP is the beginning. Reset various states */ - memset(&s->header_array, 0, sizeof(s->header_array)); - memset(&stream_state->rs, 0, sizeof(stream_state->rs)); - memset(&stream_state->ws, 0, sizeof(stream_state->ws)); - memset(stream_state->state_op_done, 0, sizeof(stream_state->state_op_done)); - memset(stream_state->state_callback_received, 0, - sizeof(stream_state->state_callback_received)); /* Start new cronet stream. It is destroyed in on_succeeded, on_canceled, * on_failed */ GPR_ASSERT(s->cbs == NULL); + GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]); s->cbs = cronet_bidirectional_stream_create(s->curr_ct.engine, s->curr_gs, &cronet_callbacks); CRONET_LOG(GPR_DEBUG, "%p = cronet_bidirectional_stream_create()", s->cbs); @@ -808,15 +848,18 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_RECV_INITIAL_METADATA)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready, - GRPC_ERROR_CANCELLED, NULL); + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { + grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready, + GRPC_ERROR_CANCELLED); + } else if (stream_state->state_callback_received[OP_FAILED]) { + grpc_closure_sched( + exec_ctx, stream_op->recv_initial_metadata_ready, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable.")); } else { grpc_chttp2_incoming_metadata_buffer_publish( &oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata); - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready, + GRPC_ERROR_NONE); } stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true; result = ACTION_TAKEN_NO_CALLBACK; @@ -865,19 +908,27 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_RECV_MESSAGE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { - CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed."); - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, - GRPC_ERROR_CANCELLED, NULL); + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { + CRONET_LOG(GPR_DEBUG, "Stream is cancelled."); + grpc_closure_sched(exec_ctx, stream_op->recv_message_ready, + GRPC_ERROR_CANCELLED); + stream_state->state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; + } else if (stream_state->state_callback_received[OP_FAILED]) { + CRONET_LOG(GPR_DEBUG, "Stream failed."); + grpc_closure_sched( + exec_ctx, stream_op->recv_message_ready, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable.")); stream_state->state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; } else if (stream_state->rs.read_stream_closed == true) { /* No more data will be received */ CRONET_LOG(GPR_DEBUG, "read stream closed"); - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, stream_op->recv_message_ready, + GRPC_ERROR_NONE); stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; } else if (stream_state->rs.length_field_received == false) { if (stream_state->rs.received_bytes == GRPC_HEADER_SIZE_IN_BYTES && stream_state->rs.remaining_bytes == 0) { @@ -907,8 +958,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, &stream_state->rs.read_slice_buffer, 0); *((grpc_byte_buffer **)stream_op->recv_message) = (grpc_byte_buffer *)&stream_state->rs.sbs; - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, stream_op->recv_message_ready, + GRPC_ERROR_NONE); stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; result = ACTION_TAKEN_NO_CALLBACK; @@ -942,14 +993,19 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, &stream_state->rs.read_slice_buffer, 0); *((grpc_byte_buffer **)stream_op->recv_message) = (grpc_byte_buffer *)&stream_state->rs.sbs; - grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, stream_op->recv_message_ready, + GRPC_ERROR_NONE); stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; - /* Clear read state of the stream, so next read op (if it were to come) - * will work */ - stream_state->rs.received_bytes = stream_state->rs.remaining_bytes = - stream_state->rs.length_field_received = 0; + /* Do an extra read to trigger on_succeeded() callback in case connection + is closed */ + stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes; + stream_state->rs.received_bytes = 0; + stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES; + stream_state->rs.length_field_received = false; + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, + stream_state->rs.remaining_bytes); result = ACTION_TAKEN_NO_CALLBACK; } } else if (stream_op->recv_trailing_metadata && @@ -986,23 +1042,30 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, CRONET_LOG(GPR_DEBUG, "W: cronet_bidirectional_stream_cancel(%p)", s->cbs); if (s->cbs) { cronet_bidirectional_stream_cancel(s->cbs); + result = ACTION_TAKEN_WITH_CALLBACK; + } else { + result = ACTION_TAKEN_NO_CALLBACK; } stream_state->state_op_done[OP_CANCEL_ERROR] = true; - result = ACTION_TAKEN_WITH_CALLBACK; + if (!stream_state->cancel_error) { + stream_state->cancel_error = GRPC_ERROR_REF(stream_op->cancel_error); + } } else if (stream_op->on_complete && op_can_be_run(stream_op, stream_state, &oas->state, OP_ON_COMPLETE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { - grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, - GRPC_ERROR_CANCELLED, NULL); + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { + grpc_closure_sched(exec_ctx, stream_op->on_complete, + GRPC_ERROR_REF(stream_state->cancel_error)); + } else if (stream_state->state_callback_received[OP_FAILED]) { + grpc_closure_sched( + exec_ctx, stream_op->on_complete, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable.")); } else { /* All actions in this stream_op are complete. Call the on_complete * callback */ - grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, - NULL); + grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE); } oas->state.state_op_done[OP_ON_COMPLETE] = true; oas->done = true; @@ -1017,6 +1080,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, make a note */ if (stream_op->recv_message) stream_state->state_op_done[OP_RECV_MESSAGE_AND_ON_COMPLETE] = true; + } else if (stream_state->fail_state && !stream_state->flush_read) { + CRONET_LOG(GPR_DEBUG, "running: %p flush read", oas); + if (stream_state->rs.read_buffer && + stream_state->rs.read_buffer != stream_state->rs.grpc_header_bytes) { + gpr_free(stream_state->rs.read_buffer); + stream_state->rs.read_buffer = NULL; + } + stream_state->rs.read_buffer = gpr_malloc(4096); + stream_state->flush_read = true; } else { result = NO_ACTION_POSSIBLE; } @@ -1042,6 +1114,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, memset(s->state.state_op_done, 0, sizeof(s->state.state_op_done)); memset(s->state.state_callback_received, 0, sizeof(s->state.state_callback_received)); + s->state.fail_state = s->state.flush_read = false; + s->state.cancel_error = NULL; gpr_mu_init(&s->mu); return 0; } @@ -1088,7 +1162,10 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, void *and_free_memory) {} + grpc_stream *gs, void *and_free_memory) { + stream_obj *s = (stream_obj *)gs; + GRPC_ERROR_UNREF(s->state.cancel_error); +} static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index 999ad5f507..cddd84fcca 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -102,13 +102,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack, return CALL_ELEMS_FROM_STACK(call_stack) + index; } -void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, - grpc_iomgr_cb_func destroy, void *destroy_arg, - const grpc_channel_filter **filters, - size_t filter_count, - const grpc_channel_args *channel_args, - grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack) { +grpc_error *grpc_channel_stack_init( + grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + const grpc_channel_args *channel_args, grpc_transport *optional_transport, + const char *name, grpc_channel_stack *stack) { size_t call_size = ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) + ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element)); @@ -126,6 +124,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element)); /* init per-filter data */ + grpc_error *first_error = GRPC_ERROR_NONE; for (i = 0; i < filter_count; i++) { args.channel_stack = stack; args.channel_args = channel_args; @@ -134,7 +133,15 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, args.is_last = i == (filter_count - 1); elems[i].filter = filters[i]; elems[i].channel_data = user_data; - elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + grpc_error *error = + elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + if (error != GRPC_ERROR_NONE) { + if (first_error == GRPC_ERROR_NONE) { + first_error = error; + } else { + GRPC_ERROR_UNREF(error); + } + } user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data); } @@ -144,6 +151,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, grpc_channel_stack_size(filters, filter_count)); stack->call_stack_size = call_size; + return first_error; } void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, @@ -289,7 +297,8 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); memset(op, 0, sizeof(*op)); op->cancel_error = GRPC_ERROR_CANCELLED; - op->on_complete = grpc_closure_create(destroy_op, op); + op->on_complete = + grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx); elem->filter->start_transport_stream_op(exec_ctx, elem, op); } @@ -299,7 +308,8 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx, grpc_slice *optional_message) { grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); memset(op, 0, sizeof(*op)); - op->on_complete = grpc_closure_create(destroy_op, op); + op->on_complete = + grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx); grpc_transport_stream_op_add_cancellation_with_message(op, status, optional_message); elem->filter->start_transport_stream_op(exec_ctx, elem, op); @@ -311,7 +321,8 @@ void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx, grpc_slice *optional_message) { grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); memset(op, 0, sizeof(*op)); - op->on_complete = grpc_closure_create(destroy_op, op); + op->on_complete = + grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx); grpc_transport_stream_op_add_close(op, status, optional_message); elem->filter->start_transport_stream_op(exec_ctx, elem, op); } diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 004643d45f..d9d3a85233 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -34,6 +34,13 @@ #ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H #define GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H +////////////////////////////////////////////////////////////////////////////// +// IMPORTANT NOTE: +// +// When you update this API, please make the corresponding changes to +// the C++ API in src/cpp/common/channel_filter.{h,cc} +////////////////////////////////////////////////////////////////////////////// + /* A channel filter defines how operations on a channel are implemented. Channel filters are chained together to create full channels, and if those chains are linear, then channel stacks provide a mechanism to minimize @@ -146,8 +153,9 @@ typedef struct { is_first, is_last designate this elements position in the stack, and are useful for asserting correct configuration by upper layer code. The filter does not need to do any chaining */ - void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_channel_element_args *args); + grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args); /* Destroy per channel data. The filter does not need to do any chaining */ void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, @@ -214,12 +222,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i); size_t grpc_channel_stack_size(const grpc_channel_filter **filters, size_t filter_count); /* Initialize a channel stack given some filters */ -void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, - grpc_iomgr_cb_func destroy, void *destroy_arg, - const grpc_channel_filter **filters, - size_t filter_count, const grpc_channel_args *args, - grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack); +grpc_error *grpc_channel_stack_init( + grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + const grpc_channel_args *args, grpc_transport *optional_transport, + const char *name, grpc_channel_stack *stack); /* Destroy a channel stack */ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_stack *stack); diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c index eda4968f48..b959517afb 100644 --- a/src/core/lib/channel/channel_stack_builder.c +++ b/src/core/lib/channel/channel_stack_builder.c @@ -227,11 +227,10 @@ void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder) { gpr_free(builder); } -void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, - grpc_iomgr_cb_func destroy, - void *destroy_arg) { +grpc_error *grpc_channel_stack_builder_finish( + grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, void **result) { // count the number of filters size_t num_filters = 0; for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { @@ -250,28 +249,35 @@ void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, size_t channel_stack_size = grpc_channel_stack_size(filters, num_filters); // allocate memory, with prefix_bytes followed by channel_stack_size - char *result = gpr_malloc(prefix_bytes + channel_stack_size); + *result = gpr_malloc(prefix_bytes + channel_stack_size); // fetch a pointer to the channel stack grpc_channel_stack *channel_stack = - (grpc_channel_stack *)(result + prefix_bytes); + (grpc_channel_stack *)((char *)(*result) + prefix_bytes); // and initialize it - grpc_channel_stack_init(exec_ctx, initial_refs, destroy, - destroy_arg == NULL ? result : destroy_arg, filters, - num_filters, builder->args, builder->transport, - builder->name, channel_stack); - - // run post-initialization functions - i = 0; - for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { - if (p->init != NULL) { - p->init(channel_stack, grpc_channel_stack_element(channel_stack, i), - p->init_arg); + grpc_error *error = grpc_channel_stack_init( + exec_ctx, initial_refs, destroy, + destroy_arg == NULL ? *result : destroy_arg, filters, num_filters, + builder->args, builder->transport, builder->name, channel_stack); + + if (error != GRPC_ERROR_NONE) { + grpc_channel_stack_destroy(exec_ctx, channel_stack); + gpr_free(*result); + *result = NULL; + } else { + // run post-initialization functions + i = 0; + for (filter_node *p = builder->begin.next; p != &builder->end; + p = p->next) { + if (p->init != NULL) { + p->init(channel_stack, grpc_channel_stack_element(channel_stack, i), + p->init_arg); + } + i++; } - i++; } grpc_channel_stack_builder_destroy(builder); gpr_free((grpc_channel_filter **)filters); - return result; + return error; } diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h index 4a00f7bfdb..65bfebcabc 100644 --- a/src/core/lib/channel/channel_stack_builder.h +++ b/src/core/lib/channel/channel_stack_builder.h @@ -146,16 +146,15 @@ bool grpc_channel_stack_builder_append_filter( void grpc_channel_stack_builder_iterator_destroy( grpc_channel_stack_builder_iterator *iterator); -/// Destroy the builder, return the freshly minted channel stack +/// Destroy the builder, return the freshly minted channel stack in \a result. /// Allocates \a prefix_bytes bytes before the channel stack /// Returns the base pointer of the allocated block /// \a initial_refs, \a destroy, \a destroy_arg are as per /// grpc_channel_stack_init -void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, - grpc_iomgr_cb_func destroy, - void *destroy_arg); +grpc_error *grpc_channel_stack_builder_finish( + grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, void **result); /// Destroy the builder without creating a channel stack void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder); diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c index 2874d63fc7..35455d4908 100644 --- a/src/core/lib/channel/compress_filter.c +++ b/src/core/lib/channel/compress_filter.c @@ -269,8 +269,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* initialize members */ grpc_slice_buffer_init(&calld->slices); calld->has_compression_algorithm = 0; - grpc_closure_init(&calld->got_slice, got_slice, elem); - grpc_closure_init(&calld->send_done, send_done, elem); + grpc_closure_init(&calld->got_slice, got_slice, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->send_done, send_done, elem, + grpc_schedule_on_exec_ctx); return GRPC_ERROR_NONE; } @@ -285,9 +287,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *channeld = elem->channel_data; channeld->enabled_algorithms_bitset = @@ -315,6 +317,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, } GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c index 038e819f72..c2a36b5558 100644 --- a/src/core/lib/channel/connected_channel.c +++ b/src/core/lib/channel/connected_channel.c @@ -114,12 +114,13 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *cd = (channel_data *)elem->channel_data; GPR_ASSERT(args->is_last); cd->transport = NULL; + return GRPC_ERROR_NONE; } /* Destructor for channel_data */ diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h index 071c5f695c..6c931ad28a 100644 --- a/src/core/lib/channel/context.h +++ b/src/core/lib/channel/context.h @@ -47,6 +47,9 @@ typedef enum { /// Value is a \a census_context. GRPC_CONTEXT_TRACING, + /// Reserved for traffic_class_context. + GRPC_CONTEXT_TRAFFIC, + GRPC_CONTEXT_COUNT } grpc_context_index; diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c index 0e703d8d27..902ebf1955 100644 --- a/src/core/lib/channel/deadline_filter.c +++ b/src/core/lib/channel/deadline_filter.c @@ -123,7 +123,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { static void inject_on_complete_cb(grpc_deadline_state* deadline_state, grpc_transport_stream_op* op) { deadline_state->next_on_complete = op->on_complete; - grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state); + grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state, + grpc_schedule_on_exec_ctx); op->on_complete = &deadline_state->on_complete; } @@ -172,8 +173,9 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state)); state->elem = elem; state->deadline = deadline; - grpc_closure_init(&state->closure, start_timer_after_init, state); - grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_init(&state->closure, start_timer_after_init, state, + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE); } } @@ -207,10 +209,11 @@ void grpc_deadline_state_client_start_transport_stream_op( // // Constructor for channel_data. Used for both client and server filters. -static void init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, - grpc_channel_element_args* args) { +static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } // Destructor for channel_data. Used for both client and server filters. @@ -289,7 +292,8 @@ static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx, calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready; calld->recv_initial_metadata = op->recv_initial_metadata; grpc_closure_init(&calld->recv_initial_metadata_ready, - recv_initial_metadata_ready, elem); + recv_initial_metadata_ready, elem, + grpc_schedule_on_exec_ctx); op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready; } // Make sure we know when the call is complete, so that we can cancel diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c index 90626dc2d1..ff827527b3 100644 --- a/src/core/lib/channel/handshaker.c +++ b/src/core/lib/channel/handshaker.c @@ -49,21 +49,21 @@ void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, handshaker->vtable = vtable; } -static void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { handshaker->vtable->destroy(exec_ctx, handshaker); } -static void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { handshaker->vtable->shutdown(exec_ctx, handshaker); } -static void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, - grpc_tcp_server_acceptor* acceptor, - grpc_closure* on_handshake_done, - grpc_handshaker_args* args) { +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args) { handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor, on_handshake_done, args); } @@ -157,14 +157,15 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, grpc_error* error) { GPR_ASSERT(mgr->index <= mgr->count); - // If we got an error or we've been shut down or we've finished the last - // handshaker, invoke the on_handshake_done callback. Otherwise, call the - // next handshaker. - if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->index == mgr->count) { + // If we got an error or we've been shut down or we're exiting early or + // we've finished the last handshaker, invoke the on_handshake_done + // callback. Otherwise, call the next handshaker. + if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early || + mgr->index == mgr->count) { // Cancel deadline timer, since we're invoking the on_handshake_done // callback now. grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); - grpc_exec_ctx_sched(exec_ctx, &mgr->on_handshake_done, error, NULL); + grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error); mgr->shutdown = true; } else { grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index], @@ -217,8 +218,10 @@ void grpc_handshake_manager_do_handshake( grpc_slice_buffer_init(mgr->args.read_buffer); // Initialize state needed for calling handshakers. mgr->acceptor = acceptor; - grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr); - grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args); + grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args, + grpc_schedule_on_exec_ctx); // Start deadline timer, which owns a ref. gpr_ref(&mgr->refs); grpc_timer_init(exec_ctx, &mgr->deadline_timer, diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h index ebbc1ff7f3..450b7adaee 100644 --- a/src/core/lib/channel/handshaker.h +++ b/src/core/lib/channel/handshaker.h @@ -72,6 +72,9 @@ typedef struct { grpc_endpoint* endpoint; grpc_channel_args* args; grpc_slice_buffer* read_buffer; + // A handshaker may set this to true before invoking on_handshake_done + // to indicate that subsequent handshakers should be skipped. + bool exit_early; // User data passed through the handshake manager. Not used by // individual handshakers. void* user_data; @@ -105,6 +108,16 @@ struct grpc_handshaker { void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, grpc_handshaker* handshaker); +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args); + /// /// grpc_handshake_manager /// diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c index fd8b46afcb..c37cab3939 100644 --- a/src/core/lib/channel/http_client_filter.c +++ b/src/core/lib/channel/http_client_filter.c @@ -352,12 +352,17 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, calld->send_message_blocked = false; grpc_slice_buffer_init(&calld->slices); grpc_closure_init(&calld->hc_on_recv_initial_metadata, - hc_on_recv_initial_metadata, elem); + hc_on_recv_initial_metadata, elem, + grpc_schedule_on_exec_ctx); grpc_closure_init(&calld->hc_on_recv_trailing_metadata, - hc_on_recv_trailing_metadata, elem); - grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem); - grpc_closure_init(&calld->got_slice, got_slice, elem); - grpc_closure_init(&calld->send_done, send_done, elem); + hc_on_recv_trailing_metadata, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->got_slice, got_slice, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->send_done, send_done, elem, + grpc_schedule_on_exec_ctx); return GRPC_ERROR_NONE; } @@ -457,9 +462,9 @@ static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(!args->is_last); GPR_ASSERT(args->optional_transport != NULL); @@ -470,6 +475,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args, args->optional_transport->vtable->name)); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c index b42ff06039..a6d720530a 100644 --- a/src/core/lib/channel/http_server_filter.c +++ b/src/core/lib/channel/http_server_filter.c @@ -334,9 +334,12 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, call_data *calld = elem->call_data; /* initialize members */ memset(calld, 0, sizeof(*calld)); - grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem); - grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem); - grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem); + grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem, + grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&calld->read_slice_buffer); return GRPC_ERROR_NONE; } @@ -350,10 +353,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c index 1cf68d790d..b5e882de52 100644 --- a/src/core/lib/channel/message_size_filter.c +++ b/src/core/lib/channel/message_size_filter.c @@ -124,7 +124,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data, gpr_free(message_string); } // Invoke the next callback. - grpc_exec_ctx_sched(exec_ctx, calld->next_recv_message_ready, error, NULL); + grpc_closure_sched(exec_ctx, calld->next_recv_message_ready, error); } // Start transport stream op. @@ -160,7 +160,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, channel_data* chand = elem->channel_data; call_data* calld = elem->call_data; calld->next_recv_message_ready = NULL; - grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem); + grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem, + grpc_schedule_on_exec_ctx); // Get max sizes from channel data, then merge in per-method config values. // Note: Per-method config is only available on the client, so we // apply the max request size to the send limit and the max response @@ -192,9 +193,9 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void* ignored) {} // Constructor for channel_data. -static void init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, - grpc_channel_element_args* args) { +static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); channel_data* chand = elem->channel_data; memset(chand, 0, sizeof(*chand)); @@ -231,6 +232,7 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx, grpc_service_config_destroy(service_config); } } + return GRPC_ERROR_NONE; } // Destructor for channel_data. diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c index fdb8abaa2d..581a74b73f 100644 --- a/src/core/lib/http/httpcli.c +++ b/src/core/lib/http/httpcli.c @@ -103,7 +103,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req, grpc_error *error) { grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent, req->context->pollset_set); - grpc_exec_ctx_sched(exec_ctx, req->on_done, error, NULL); + grpc_closure_sched(exec_ctx, req->on_done, error); grpc_http_parser_destroy(&req->parser); if (req->addresses != NULL) { grpc_resolved_addresses_destroy(req->addresses); @@ -224,7 +224,8 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, return; } addr = &req->addresses->addrs[req->next_address++]; - grpc_closure_init(&req->connected, on_connected, req); + grpc_closure_init(&req->connected, on_connected, req, + grpc_schedule_on_exec_ctx); grpc_arg arg; arg.key = GRPC_ARG_RESOURCE_QUOTA; arg.type = GRPC_ARG_POINTER; @@ -266,8 +267,9 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, req->pollent = pollent; req->overall_error = GRPC_ERROR_NONE; req->resource_quota = grpc_resource_quota_internal_ref(resource_quota); - grpc_closure_init(&req->on_read, on_read, req); - grpc_closure_init(&req->done_write, done_write, req); + grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx); + grpc_closure_init(&req->done_write, done_write, req, + grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&req->incoming); grpc_slice_buffer_init(&req->outgoing); grpc_iomgr_register_object(&req->iomgr_obj, name); @@ -277,8 +279,11 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, GPR_ASSERT(pollent); grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent, req->context->pollset_set); - grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port, - grpc_closure_create(on_resolved, req), &req->addresses); + grpc_resolve_address( + exec_ctx, request->host, req->handshaker->default_port, + req->context->pollset_set, + grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx), + &req->addresses); } void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c index 0ab34d00e4..6b197c2e35 100644 --- a/src/core/lib/http/httpcli_security_connector.c +++ b/src/core/lib/http/httpcli_security_connector.c @@ -60,9 +60,9 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) { gpr_free(sc); } -static void httpcli_ssl_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_httpcli_ssl_channel_security_connector *c = (grpc_httpcli_ssl_channel_security_connector *)sc; tsi_handshaker *handshaker = NULL; @@ -74,8 +74,9 @@ static void httpcli_ssl_create_handshakers( tsi_result_to_string(result)); } } - grpc_security_create_handshakers(exec_ctx, handshaker, &sc->base, - handshake_mgr); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create(exec_ctx, handshaker, &sc->base)); } static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, @@ -95,7 +96,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE(msg); gpr_free(msg); } - grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); + grpc_closure_sched(exec_ctx, on_peer_checked, error); tsi_peer_destruct(&peer); } @@ -132,7 +133,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create( *sc = NULL; return GRPC_SECURITY_ERROR; } - c->base.create_handshakers = httpcli_ssl_create_handshakers; + c->base.add_handshakers = httpcli_ssl_add_handshakers; *sc = &c->base; return GRPC_SECURITY_OK; } @@ -185,8 +186,8 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, GPR_ASSERT(httpcli_ssl_channel_security_connector_create( pem_root_certs, pem_root_certs_size, host, &sc) == GRPC_SECURITY_OK); - grpc_channel_security_connector_create_handshakers(exec_ctx, sc, - c->handshake_mgr); + grpc_channel_security_connector_add_handshakers(exec_ctx, sc, + c->handshake_mgr); grpc_handshake_manager_do_handshake( exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline, NULL /* acceptor */, on_handshake_done, c /* user_data */); diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c index c6ddc76732..da0ec878a3 100644 --- a/src/core/lib/iomgr/closure.c +++ b/src/core/lib/iomgr/closure.c @@ -37,10 +37,13 @@ #include "src/core/lib/profiling/timers.h" -void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg) { +grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, + void *cb_arg, + grpc_closure_scheduler *scheduler) { closure->cb = cb; closure->cb_arg = cb_arg; + closure->scheduler = scheduler; + return closure; } void grpc_closure_list_init(grpc_closure_list *closure_list) { @@ -105,11 +108,12 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, cb(exec_ctx, cb_arg, error); } -grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) { +grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, + grpc_closure_scheduler *scheduler) { wrapped_closure *wc = gpr_malloc(sizeof(*wc)); wc->cb = cb; wc->cb_arg = cb_arg; - grpc_closure_init(&wc->wrapper, closure_wrapper, wc); + grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler); return &wc->wrapper; } @@ -117,8 +121,30 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, grpc_error *error) { GPR_TIMER_BEGIN("grpc_closure_run", 0); if (c != NULL) { - c->cb(exec_ctx, c->cb_arg, error); + c->scheduler->vtable->run(exec_ctx, c, error); + } else { + GRPC_ERROR_UNREF(error); } - GRPC_ERROR_UNREF(error); GPR_TIMER_END("grpc_closure_run", 0); } + +void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, + grpc_error *error) { + GPR_TIMER_BEGIN("grpc_closure_sched", 0); + if (c != NULL) { + c->scheduler->vtable->sched(exec_ctx, c, error); + } else { + GRPC_ERROR_UNREF(error); + } + GPR_TIMER_END("grpc_closure_sched", 0); +} + +void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { + grpc_closure *c = list->head; + while (c != NULL) { + grpc_closure *next = c->next_data.next; + c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error); + c = next; + } + list->head = list->tail = NULL; +} diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h index 2b4b271eaa..1b5d9b20a0 100644 --- a/src/core/lib/iomgr/closure.h +++ b/src/core/lib/iomgr/closure.h @@ -59,6 +59,22 @@ typedef struct grpc_closure_list { typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); +typedef struct grpc_closure_scheduler grpc_closure_scheduler; + +typedef struct grpc_closure_scheduler_vtable { + /* NOTE: for all these functions, closure->scheduler == the scheduler that was + used to find this vtable */ + void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error); + void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error); +} grpc_closure_scheduler_vtable; + +/** Abstract type that can schedule closures for execution */ +struct grpc_closure_scheduler { + const grpc_closure_scheduler_vtable *vtable; +}; + /** A closure over a grpc_iomgr_cb_func. */ struct grpc_closure { /** Once queued, next indicates the next queued closure; before then, scratch @@ -75,6 +91,10 @@ struct grpc_closure { /** Arguments to be passed to "cb". */ void *cb_arg; + /** Scheduler to schedule against: NULL to schedule against current execution + context */ + grpc_closure_scheduler *scheduler; + /** Once queued, the result of the closure. Before then: scratch space */ union { grpc_error *error; @@ -82,12 +102,14 @@ struct grpc_closure { } error_data; }; -/** Initializes \a closure with \a cb and \a cb_arg. */ -void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg); +/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */ +grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, + void *cb_arg, + grpc_closure_scheduler *scheduler); /* Create a heap allocated closure: try to avoid except for very rare events */ -grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg); +grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, + grpc_closure_scheduler *scheduler); #define GRPC_CLOSURE_LIST_INIT \ { NULL, NULL } @@ -115,4 +137,13 @@ bool grpc_closure_list_empty(grpc_closure_list list); void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error); +/** Schedule a closure to be run. Does not need to be run from a safe point. */ +void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error); + +/** Schedule all closures in a list to be run. Does not need to be run from a + * safe point. */ +void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, + grpc_closure_list *closure_list); + #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */ diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index cfc67020ae..c26a73b2b7 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -56,6 +56,10 @@ int grpc_combiner_trace = 0; struct grpc_combiner { grpc_combiner *next_combiner_on_this_exec_ctx; grpc_workqueue *optional_workqueue; + grpc_closure_scheduler uncovered_scheduler; + grpc_closure_scheduler covered_scheduler; + grpc_closure_scheduler uncovered_finally_scheduler; + grpc_closure_scheduler covered_finally_scheduler; gpr_mpscq queue; // state is: // lower bit - zero if orphaned (STATE_UNORPHANED) @@ -70,6 +74,26 @@ struct grpc_combiner { grpc_closure offload; }; +static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, + grpc_closure *closure, grpc_error *error); +static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, + grpc_closure *closure, grpc_error *error); +static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, + grpc_closure *closure, + grpc_error *error); +static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, + grpc_closure *closure, + grpc_error *error); + +static const grpc_closure_scheduler_vtable scheduler_uncovered = { + combiner_exec_uncovered, combiner_exec_uncovered}; +static const grpc_closure_scheduler_vtable scheduler_covered = { + combiner_exec_covered, combiner_exec_covered}; +static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = { + combiner_finally_exec_uncovered, combiner_finally_exec_uncovered}; +static const grpc_closure_scheduler_vtable finally_scheduler_covered = { + combiner_finally_exec_covered, combiner_finally_exec_covered}; + static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); typedef struct { @@ -102,11 +126,16 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) { lock->time_to_execute_final_list = false; lock->optional_workqueue = optional_workqueue; lock->final_list_covered_by_poller = false; + lock->uncovered_scheduler.vtable = &scheduler_uncovered; + lock->covered_scheduler.vtable = &scheduler_covered; + lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered; + lock->covered_finally_scheduler.vtable = &finally_scheduler_covered; gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0); gpr_mpscq_init(&lock->queue); grpc_closure_list_init(&lock->final_list); - grpc_closure_init(&lock->offload, offload, lock); + grpc_closure_init(&lock->offload, offload, lock, + grpc_workqueue_scheduler(lock->optional_workqueue)); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); return lock; } @@ -148,9 +177,9 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, } } -void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, - grpc_closure *cl, grpc_error *error, - bool covered_by_poller) { +static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, + grpc_closure *cl, grpc_error *error, + bool covered_by_poller) { GPR_TIMER_BEGIN("combiner.execute", 0); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); GRPC_COMBINER_TRACE(gpr_log( @@ -171,6 +200,24 @@ void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, GPR_TIMER_END("combiner.execute", 0); } +#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ + ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ + offsetof(grpc_combiner, scheduler_name))) + +static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, + grpc_error *error) { + combiner_exec(exec_ctx, + COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl, + error, false); +} + +static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, + grpc_error *error) { + combiner_exec(exec_ctx, + COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl, + error, true); +} + static void move_next(grpc_exec_ctx *exec_ctx) { exec_ctx->active_combiner = exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; @@ -188,8 +235,7 @@ static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { move_next(exec_ctx); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock, lock->optional_workqueue)); - grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload, - GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE); } bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { @@ -312,23 +358,22 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { } static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, - grpc_error *error) { - grpc_combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure, - GRPC_ERROR_REF(error), false); -} + grpc_error *error); -void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, - grpc_closure *closure, grpc_error *error, - bool covered_by_poller) { +static void combiner_execute_finally(grpc_exec_ctx *exec_ctx, + grpc_combiner *lock, grpc_closure *closure, + grpc_error *error, + bool covered_by_poller) { GRPC_COMBINER_TRACE(gpr_log( GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock, closure, exec_ctx->active_combiner, covered_by_poller)); GPR_TIMER_BEGIN("combiner.execute_finally", 0); if (exec_ctx->active_combiner != lock) { GPR_TIMER_MARK("slowpath", 0); - grpc_combiner_execute(exec_ctx, lock, - grpc_closure_create(enqueue_finally, closure), error, - false); + grpc_closure_sched( + exec_ctx, grpc_closure_create(enqueue_finally, closure, + grpc_combiner_scheduler(lock, false)), + error); GPR_TIMER_END("combiner.execute_finally", 0); return; } @@ -342,3 +387,36 @@ void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, grpc_closure_list_append(&lock->final_list, closure, error); GPR_TIMER_END("combiner.execute_finally", 0); } + +static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, + grpc_error *error) { + combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure, + GRPC_ERROR_REF(error), false); +} + +static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, + grpc_closure *cl, + grpc_error *error) { + combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER( + cl, uncovered_finally_scheduler), + cl, error, false); +} + +static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, + grpc_closure *cl, grpc_error *error) { + combiner_execute_finally( + exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler), + cl, error, true); +} + +grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner, + bool covered_by_poller) { + return covered_by_poller ? &combiner->covered_scheduler + : &combiner->uncovered_scheduler; +} + +grpc_closure_scheduler *grpc_combiner_finally_scheduler( + grpc_combiner *combiner, bool covered_by_poller) { + return covered_by_poller ? &combiner->covered_finally_scheduler + : &combiner->uncovered_finally_scheduler; +} diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h index d04eeed83a..81dff85d40 100644 --- a/src/core/lib/iomgr/combiner.h +++ b/src/core/lib/iomgr/combiner.h @@ -50,14 +50,12 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue); // Destroy the lock void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock); -// Execute \a action within the lock. -void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, - grpc_closure *closure, grpc_error *error, - bool covered_by_poller); -// Execute \a action within the lock just prior to unlocking. -void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, - grpc_closure *closure, grpc_error *error, - bool covered_by_poller); +// Fetch a scheduler to schedule closures against +grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock, + bool covered_by_poller); +// Scheduler to execute \a action within the lock just prior to unlocking. +grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock, + bool covered_by_poller); bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx); diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index 07fbfd849e..045001f6d8 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -69,6 +69,9 @@ static int grpc_polling_trace = 0; /* Disabled by default */ gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ } +/* Uncomment the following enable extra checks on poll_object operations */ +/* #define PO_DEBUG */ + static int grpc_wakeup_signal = -1; static bool is_grpc_wakeup_signal_initialized = false; @@ -95,10 +98,42 @@ void grpc_use_signal(int signum) { struct polling_island; +typedef enum { + POLL_OBJ_FD, + POLL_OBJ_POLLSET, + POLL_OBJ_POLLSET_SET +} poll_obj_type; + +typedef struct poll_obj { +#ifdef PO_DEBUG + poll_obj_type obj_type; +#endif + gpr_mu mu; + struct polling_island *pi; +} poll_obj; + +const char *poll_obj_string(poll_obj_type po_type) { + switch (po_type) { + case POLL_OBJ_FD: + return "fd"; + case POLL_OBJ_POLLSET: + return "pollset"; + case POLL_OBJ_POLLSET_SET: + return "pollset_set"; + } + + GPR_UNREACHABLE_CODE(return "UNKNOWN"); +} + /******************************************************************************* * Fd Declarations */ + +#define FD_FROM_PO(po) ((grpc_fd *)(po)) + struct grpc_fd { + poll_obj po; + int fd; /* refst format: bit 0 : 1=Active / 0=Orphaned @@ -106,8 +141,6 @@ struct grpc_fd { Ref/Unref by two to avoid altering the orphaned bit */ gpr_atm refst; - gpr_mu mu; - /* Indicates that the fd is shutdown and that any pending read/write closures should fail */ bool shutdown; @@ -120,9 +153,6 @@ struct grpc_fd { grpc_closure *read_closure; grpc_closure *write_closure; - /* The polling island to which this fd belongs to (protected by mu) */ - struct polling_island *polling_island; - struct grpc_fd *freelist_next; grpc_closure *on_done_closure; @@ -172,6 +202,8 @@ static void fd_global_shutdown(void); /* This is also used as grpc_workqueue (by directly casing it) */ typedef struct polling_island { + grpc_closure_scheduler workqueue_scheduler; + gpr_mu mu; /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement the refcount. @@ -225,41 +257,21 @@ struct grpc_pollset_worker { }; struct grpc_pollset { - gpr_mu mu; + poll_obj po; + grpc_pollset_worker root_worker; bool kicked_without_pollers; bool shutting_down; /* Is the pollset shutting down ? */ bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ grpc_closure *shutdown_done; /* Called after after shutdown is complete */ - - /* The polling island to which this pollset belongs to */ - struct polling_island *polling_island; }; /******************************************************************************* * Pollset-set Declarations */ -/* TODO: sreek - Change the pollset_set implementation such that a pollset_set - * directly points to a polling_island (and adding an fd/pollset/pollset_set to - * the current pollset_set would result in polling island merges. This would - * remove the need to maintain fd_count here. This will also significantly - * simplify the grpc_fd structure since we would no longer need to explicitly - * maintain the orphaned state */ struct grpc_pollset_set { - gpr_mu mu; - - size_t pollset_count; - size_t pollset_capacity; - grpc_pollset **pollsets; - - size_t pollset_set_count; - size_t pollset_set_capacity; - struct grpc_pollset_set **pollset_sets; - - size_t fd_count; - size_t fd_capacity; - grpc_fd **fds; + poll_obj po; }; /******************************************************************************* @@ -295,6 +307,8 @@ static __thread polling_island *g_current_thread_polling_island; /* Forward declaration */ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); +static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -307,6 +321,9 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ +static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { + workqueue_enqueue, workqueue_enqueue}; + static void pi_add_ref(polling_island *pi); static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); @@ -519,6 +536,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, *error = GRPC_ERROR_NONE; pi = gpr_malloc(sizeof(*pi)); + pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable; gpr_mu_init(&pi->mu); pi->fd_cnt = 0; pi->fd_capacity = 0; @@ -790,10 +808,10 @@ static polling_island *polling_island_merge(polling_island *p, return q; } -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue, grpc_closure *closure, +static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error) { GPR_TIMER_BEGIN("workqueue.enqueue", 0); + grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler; /* take a ref to the workqueue: otherwise it can happen that whatever events * this kicks off ends up destroying the workqueue before this function * completes */ @@ -810,6 +828,12 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, GPR_TIMER_END("workqueue.enqueue", 0); } +static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { + polling_island *pi = (polling_island *)workqueue; + return workqueue == NULL ? grpc_schedule_on_exec_ctx + : &pi->workqueue_scheduler; +} + static grpc_error *polling_island_global_init() { grpc_error *error = GRPC_ERROR_NONE; @@ -915,7 +939,7 @@ static void fd_global_shutdown(void) { while (fd_freelist != NULL) { grpc_fd *fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; - gpr_mu_destroy(&fd->mu); + gpr_mu_destroy(&fd->po.mu); gpr_free(fd); } gpr_mu_destroy(&fd_freelist_mu); @@ -933,13 +957,17 @@ static grpc_fd *fd_create(int fd, const char *name) { if (new_fd == NULL) { new_fd = gpr_malloc(sizeof(grpc_fd)); - gpr_mu_init(&new_fd->mu); + gpr_mu_init(&new_fd->po.mu); } - /* Note: It is not really needed to get the new_fd->mu lock here. If this is a - newly created fd (or an fd we got from the freelist), no one else would be - holding a lock to it anyway. */ - gpr_mu_lock(&new_fd->mu); + /* Note: It is not really needed to get the new_fd->po.mu lock here. If this + * is a newly created fd (or an fd we got from the freelist), no one else + * would be holding a lock to it anyway. */ + gpr_mu_lock(&new_fd->po.mu); + new_fd->po.pi = NULL; +#ifdef PO_DEBUG + new_fd->po.obj_type = POLL_OBJ_FD; +#endif gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); new_fd->fd = fd; @@ -947,12 +975,11 @@ static grpc_fd *fd_create(int fd, const char *name) { new_fd->orphaned = false; new_fd->read_closure = CLOSURE_NOT_READY; new_fd->write_closure = CLOSURE_NOT_READY; - new_fd->polling_island = NULL; new_fd->freelist_next = NULL; new_fd->on_done_closure = NULL; new_fd->read_notifier_pollset = NULL; - gpr_mu_unlock(&new_fd->mu); + gpr_mu_unlock(&new_fd->po.mu); char *fd_name; gpr_asprintf(&fd_name, "%s fd=%d", name, fd); @@ -964,17 +991,13 @@ static grpc_fd *fd_create(int fd, const char *name) { return new_fd; } -static bool fd_is_orphaned(grpc_fd *fd) { - return (gpr_atm_acq_load(&fd->refst) & 1) == 0; -} - static int fd_wrapped_fd(grpc_fd *fd) { int ret_fd = -1; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); if (!fd->orphaned) { ret_fd = fd->fd; } - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return ret_fd; } @@ -986,7 +1009,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *error = GRPC_ERROR_NONE; polling_island *unref_pi = NULL; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); fd->on_done_closure = on_done; /* If release_fd is not NULL, we should be relinquishing control of the file @@ -1006,25 +1029,24 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, /* Remove the fd from the polling island: - Get a lock on the latest polling island (i.e the last island in the - linked list pointed by fd->polling_island). This is the island that + linked list pointed by fd->po.pi). This is the island that would actually contain the fd - Remove the fd from the latest polling island - Unlock the latest polling island - - Set fd->polling_island to NULL (but remove the ref on the polling island + - Set fd->po.pi to NULL (but remove the ref on the polling island before doing this.) */ - if (fd->polling_island != NULL) { - polling_island *pi_latest = polling_island_lock(fd->polling_island); + if (fd->po.pi != NULL) { + polling_island *pi_latest = polling_island_lock(fd->po.pi); polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error); gpr_mu_unlock(&pi_latest->mu); - unref_pi = fd->polling_island; - fd->polling_island = NULL; + unref_pi = fd->po.pi; + fd->po.pi = NULL; } - grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error), - NULL); + grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); UNREF_BY(fd, 2, reason); /* Drop the reference */ if (unref_pi != NULL) { /* Unref stale polling island here, outside the fd lock above. @@ -1048,16 +1070,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) { static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st, grpc_closure *closure) { if (fd->shutdown) { - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), - NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown")); } else if (*st == CLOSURE_NOT_READY) { /* not ready ==> switch to a waiting state by setting the closure */ *st = closure; } else if (*st == CLOSURE_READY) { /* already ready ==> queue the closure to run immediately */ *st = CLOSURE_NOT_READY; - grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), - NULL); + grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown)); } else { /* upcallptr was set to a different closure. This is an error! */ gpr_log(GPR_ERROR, @@ -1079,7 +1099,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, return 0; } else { /* waiting ==> queue closure */ - grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); + grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown)); *st = CLOSURE_NOT_READY; return 1; } @@ -1089,23 +1109,23 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { grpc_pollset *notifier = NULL; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notifier = fd->read_notifier_pollset; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return notifier; } static bool fd_is_shutdown(grpc_fd *fd) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); const bool r = fd->shutdown; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return r; } /* Might be called multiple times */ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); /* Do the actual shutdown only once */ if (!fd->shutdown) { fd->shutdown = true; @@ -1116,28 +1136,28 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { set_ready_locked(exec_ctx, fd, &fd->read_closure); set_ready_locked(exec_ctx, fd, &fd->write_closure); } - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - gpr_mu_lock(&fd->mu); - grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF( - (grpc_workqueue *)fd->polling_island, "fd_get_workqueue"); - gpr_mu_unlock(&fd->mu); + gpr_mu_lock(&fd->po.mu); + grpc_workqueue *workqueue = + GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue"); + gpr_mu_unlock(&fd->po.mu); return workqueue; } @@ -1277,8 +1297,12 @@ static grpc_error *kick_poller(void) { } static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - gpr_mu_init(&pollset->mu); - *mu = &pollset->mu; + gpr_mu_init(&pollset->po.mu); + *mu = &pollset->po.mu; + pollset->po.pi = NULL; +#ifdef PO_DEBUG + pollset->po.obj_type = POLL_OBJ_POLLSET; +#endif pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; pollset->kicked_without_pollers = false; @@ -1286,8 +1310,6 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutting_down = false; pollset->finish_shutdown_called = false; pollset->shutdown_done = NULL; - - pollset->polling_island = NULL; } /* Convert a timespec to milliseconds: @@ -1317,26 +1339,26 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - /* Need the fd->mu since we might be racing with fd_notify_on_read */ - gpr_mu_lock(&fd->mu); + /* Need the fd->po.mu since we might be racing with fd_notify_on_read */ + gpr_mu_lock(&fd->po.mu); set_ready_locked(exec_ctx, fd, &fd->read_closure); fd->read_notifier_pollset = notifier; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - /* Need the fd->mu since we might be racing with fd_notify_on_write */ - gpr_mu_lock(&fd->mu); + /* Need the fd->po.mu since we might be racing with fd_notify_on_write */ + gpr_mu_lock(&fd->po.mu); set_ready_locked(exec_ctx, fd, &fd->write_closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, char *reason) { - if (ps->polling_island != NULL) { - PI_UNREF(exec_ctx, ps->polling_island, reason); + if (ps->po.pi != NULL) { + PI_UNREF(exec_ctx, ps->po.pi, reason); } - ps->polling_island = NULL; + ps->po.pi = NULL; } static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, @@ -1346,12 +1368,12 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, pollset->finish_shutdown_called = true; - /* Release the ref and set pollset->polling_island to NULL */ + /* Release the ref and set pollset->po.pi to NULL */ pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); - grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); } -/* pollset->mu lock must be held by the caller before calling this */ +/* pollset->po.mu lock must be held by the caller before calling this */ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_TIMER_BEGIN("pollset_shutdown", 0); @@ -1376,7 +1398,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, * here */ static void pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); - gpr_mu_destroy(&pollset->mu); + gpr_mu_destroy(&pollset->po.mu); } static void pollset_reset(grpc_pollset *pollset) { @@ -1386,7 +1408,7 @@ static void pollset_reset(grpc_pollset *pollset) { pollset->finish_shutdown_called = false; pollset->kicked_without_pollers = false; pollset->shutdown_done = NULL; - GPR_ASSERT(pollset->polling_island == NULL); + GPR_ASSERT(pollset->po.pi == NULL); } static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, @@ -1399,7 +1421,9 @@ static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, workqueue_maybe_wakeup(pi); } grpc_closure *c = (grpc_closure *)n; - grpc_closure_run(exec_ctx, c, c->error_data.error); + grpc_error *error = c->error_data.error; + c->cb(exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); return true; } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) { /* n == NULL might mean there's work but it's not available to be popped @@ -1426,7 +1450,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("pollset_work_and_unlock", 0); /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the - latest polling island pointed by pollset->polling_island. + latest polling island pointed by pollset->po.pi Since epoll_fd is immutable, we can read it without obtaining the polling island lock. There is however a possibility that the polling island (from @@ -1435,36 +1459,36 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, right-away from epoll_wait() and pick up the latest polling_island the next this function (i.e pollset_work_and_unlock()) is called */ - if (pollset->polling_island == NULL) { - pollset->polling_island = polling_island_create(exec_ctx, NULL, error); - if (pollset->polling_island == NULL) { + if (pollset->po.pi == NULL) { + pollset->po.pi = polling_island_create(exec_ctx, NULL, error); + if (pollset->po.pi == NULL) { GPR_TIMER_END("pollset_work_and_unlock", 0); return; /* Fatal error. We cannot continue */ } - PI_ADD_REF(pollset->polling_island, "ps"); + PI_ADD_REF(pollset->po.pi, "ps"); GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p", - (void *)pollset, (void *)pollset->polling_island); + (void *)pollset, (void *)pollset->po.pi); } - pi = polling_island_maybe_get_latest(pollset->polling_island); + pi = polling_island_maybe_get_latest(pollset->po.pi); epoll_fd = pi->epoll_fd; - /* Update the pollset->polling_island since the island being pointed by - pollset->polling_island maybe older than the one pointed by pi) */ - if (pollset->polling_island != pi) { + /* Update the pollset->po.pi since the island being pointed by + pollset->po.pi maybe older than the one pointed by pi) */ + if (pollset->po.pi != pi) { /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the polling island to be deleted */ PI_ADD_REF(pi, "ps"); - PI_UNREF(exec_ctx, pollset->polling_island, "ps"); - pollset->polling_island = pi; + PI_UNREF(exec_ctx, pollset->po.pi, "ps"); + pollset->po.pi = pi; } /* Add an extra ref so that the island does not get destroyed (which means the epoll_fd won't be closed) while we are are doing an epoll_wait() on the epoll_fd */ PI_ADD_REF(pi, "ps_work"); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&pollset->po.mu); /* If we get some workqueue work to do, it might end up completing an item on the completion queue, so there's no need to poll... so we skip that and @@ -1537,17 +1561,17 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, GPR_ASSERT(pi != NULL); /* Before leaving, release the extra ref we added to the polling island. It - is important to use "pi" here (i.e our old copy of pollset->polling_island + is important to use "pi" here (i.e our old copy of pollset->po.pi that we got before releasing the polling island lock). This is because - pollset->polling_island pointer might get udpated in other parts of the + pollset->po.pi pointer might get udpated in other parts of the code when there is an island merge while we are doing epoll_wait() above */ PI_UNREF(exec_ctx, pi, "ps_work"); GPR_TIMER_END("pollset_work_and_unlock", 0); } -/* pollset->mu lock must be held by the caller before calling this. - The function pollset_work() may temporarily release the lock (pollset->mu) +/* pollset->po.mu lock must be held by the caller before calling this. + The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, @@ -1617,7 +1641,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &g_orig_sigmask, &error); grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&pollset->po.mu); /* Note: There is no need to reset worker.is_kicked to 0 since we are no longer going to use this worker */ @@ -1637,9 +1661,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); finish_shutdown_locked(exec_ctx, pollset); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&pollset->po.mu); grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&pollset->po.mu); } *worker_hdl = NULL; @@ -1653,130 +1677,160 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, return error; } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - GPR_TIMER_BEGIN("pollset_add_fd", 0); - - grpc_error *error = GRPC_ERROR_NONE; +static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, + poll_obj_type bag_type, poll_obj *item, + poll_obj_type item_type) { + GPR_TIMER_BEGIN("add_poll_object", 0); - gpr_mu_lock(&pollset->mu); - gpr_mu_lock(&fd->mu); +#ifdef PO_DEBUG + GPR_ASSERT(item->obj_type == item_type); + GPR_ASSERT(bag->obj_type == bag_type); +#endif + grpc_error *error = GRPC_ERROR_NONE; polling_island *pi_new = NULL; + gpr_mu_lock(&bag->mu); + gpr_mu_lock(&item->mu); + retry: - /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and - * equal, do nothing. - * 2) If fd->polling_island and pollset->polling_island are both NULL, create - * a new polling island (with a refcount of 2) and make the polling_island - * fields in both fd and pollset to point to the new island - * 3) If one of fd->polling_island or pollset->polling_island is NULL, update - * the NULL polling_island field to point to the non-NULL polling_island - * field (ensure that the refcount on the polling island is incremented by - * 1 to account for the newly added reference) - * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL - * and different, merge both the polling islands and update the - * polling_island fields in both fd and pollset to point to the merged - * polling island. + /* + * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing + * 2) If item->pi and bag->pi are both NULL, create a new polling island (with + * a refcount of 2) and point item->pi and bag->pi to the new island + * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to + * the other's non-NULL pi + * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the + * polling islands and update item->pi and bag->pi to point to the new + * island */ - if (fd->orphaned) { - gpr_mu_unlock(&fd->mu); - gpr_mu_unlock(&pollset->mu); - /* early out */ + /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already + * orphaned */ + if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) { + gpr_mu_unlock(&item->mu); + gpr_mu_unlock(&bag->mu); return; } - if (fd->polling_island == pollset->polling_island) { - pi_new = fd->polling_island; + if (item->pi == bag->pi) { + pi_new = item->pi; if (pi_new == NULL) { - /* Unlock before creating a new polling island: the polling island will - create a workqueue which creates a file descriptor, and holding an fd - lock here can eventually cause a loop to appear to TSAN (making it - unhappy). We don't think it's a real loop (there's an epoch point where - that loop possibility disappears), but the advantages of keeping TSAN - happy outweigh any performance advantage we might have by keeping the - lock held. */ - gpr_mu_unlock(&fd->mu); - pi_new = polling_island_create(exec_ctx, fd, &error); - gpr_mu_lock(&fd->mu); - /* Need to reverify any assumptions made between the initial lock and - getting to this branch: if they've changed, we need to throw away our - work and figure things out again. */ - if (fd->polling_island != NULL) { - GRPC_POLLING_TRACE( - "pollset_add_fd: Raced creating new polling island. pi_new: %p " - "(fd: %d, pollset: %p)", - (void *)pi_new, fd->fd, (void *)pollset); - - /* No need to lock 'pi_new' here since this is a new polling island and - * no one has a reference to it yet */ - polling_island_remove_all_fds_locked(pi_new, true, &error); - - /* Ref and unref so that the polling island gets deleted during unref */ - PI_ADD_REF(pi_new, "dance_of_destruction"); - PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); - goto retry; + /* GPR_ASSERT(item->pi == bag->pi == NULL) */ + + /* If we are adding an fd to a bag (i.e pollset or pollset_set), then + * we need to do some extra work to make TSAN happy */ + if (item_type == POLL_OBJ_FD) { + /* Unlock before creating a new polling island: the polling island will + create a workqueue which creates a file descriptor, and holding an fd + lock here can eventually cause a loop to appear to TSAN (making it + unhappy). We don't think it's a real loop (there's an epoch point + where that loop possibility disappears), but the advantages of + keeping TSAN happy outweigh any performance advantage we might have + by keeping the lock held. */ + gpr_mu_unlock(&item->mu); + pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error); + gpr_mu_lock(&item->mu); + + /* Need to reverify any assumptions made between the initial lock and + getting to this branch: if they've changed, we need to throw away our + work and figure things out again. */ + if (item->pi != NULL) { + GRPC_POLLING_TRACE( + "add_poll_object: Raced creating new polling island. pi_new: %p " + "(fd: %d, %s: %p)", + (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), + (void *)bag); + /* No need to lock 'pi_new' here since this is a new polling island + * and no one has a reference to it yet */ + polling_island_remove_all_fds_locked(pi_new, true, &error); + + /* Ref and unref so that the polling island gets deleted during unref + */ + PI_ADD_REF(pi_new, "dance_of_destruction"); + PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); + goto retry; + } } else { - GRPC_POLLING_TRACE( - "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, " - "pollset: %p)", - (void *)pi_new, fd->fd, (void *)pollset); + pi_new = polling_island_create(exec_ctx, NULL, &error); } + + GRPC_POLLING_TRACE( + "add_poll_object: Created new polling island. pi_new: %p (%s: %p, " + "%s: %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); + } else { + GRPC_POLLING_TRACE( + "add_poll_object: Same polling island. pi: %p (%s, %s)", + (void *)pi_new, poll_obj_string(item_type), + poll_obj_string(bag_type)); + } + } else if (item->pi == NULL) { + /* GPR_ASSERT(bag->pi != NULL) */ + /* Make pi_new point to latest pi*/ + pi_new = polling_island_lock(bag->pi); + + if (item_type == POLL_OBJ_FD) { + grpc_fd *fd = FD_FROM_PO(item); + polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); } - } else if (fd->polling_island == NULL) { - pi_new = polling_island_lock(pollset->polling_island); - polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); - gpr_mu_unlock(&pi_new->mu); + gpr_mu_unlock(&pi_new->mu); GRPC_POLLING_TRACE( - "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, " - "pollset->pi: %p)", - (void *)pi_new, fd->fd, (void *)pollset, - (void *)pollset->polling_island); - } else if (pollset->polling_island == NULL) { - pi_new = polling_island_lock(fd->polling_island); + "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); + } else if (bag->pi == NULL) { + /* GPR_ASSERT(item->pi != NULL) */ + /* Make pi_new to point to latest pi */ + pi_new = polling_island_lock(item->pi); gpr_mu_unlock(&pi_new->mu); - GRPC_POLLING_TRACE( - "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: " - "%p, fd->pi: %p", - (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island); + "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); } else { - pi_new = polling_island_merge(fd->polling_island, pollset->polling_island, - &error); + pi_new = polling_island_merge(item->pi, bag->pi, &error); GRPC_POLLING_TRACE( - "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: " - "%p, fd->pi: %p, pollset->pi: %p)", - (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island, - (void *)pollset->polling_island); + "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); } - /* At this point, pi_new is the polling island that both fd->polling_island - and pollset->polling_island must be pointing to */ + /* At this point, pi_new is the polling island that both item->pi and bag->pi + MUST be pointing to */ - if (fd->polling_island != pi_new) { - PI_ADD_REF(pi_new, "fd"); - if (fd->polling_island != NULL) { - PI_UNREF(exec_ctx, fd->polling_island, "fd"); + if (item->pi != pi_new) { + PI_ADD_REF(pi_new, poll_obj_string(item_type)); + if (item->pi != NULL) { + PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type)); } - fd->polling_island = pi_new; + item->pi = pi_new; } - if (pollset->polling_island != pi_new) { - PI_ADD_REF(pi_new, "ps"); - if (pollset->polling_island != NULL) { - PI_UNREF(exec_ctx, pollset->polling_island, "ps"); + if (bag->pi != pi_new) { + PI_ADD_REF(pi_new, poll_obj_string(bag_type)); + if (bag->pi != NULL) { + PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type)); } - pollset->polling_island = pi_new; + bag->pi = pi_new; } - gpr_mu_unlock(&fd->mu); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&item->mu); + gpr_mu_unlock(&bag->mu); - GRPC_LOG_IF_ERROR("pollset_add_fd", error); + GRPC_LOG_IF_ERROR("add_poll_object", error); + GPR_TIMER_END("add_poll_object", 0); +} - GPR_TIMER_END("pollset_add_fd", 0); +static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd) { + add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po, + POLL_OBJ_FD); } /******************************************************************************* @@ -1784,142 +1838,60 @@ retry: */ static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set)); - memset(pollset_set, 0, sizeof(*pollset_set)); - gpr_mu_init(&pollset_set->mu); - return pollset_set; + grpc_pollset_set *pss = gpr_malloc(sizeof(*pss)); + gpr_mu_init(&pss->po.mu); + pss->po.pi = NULL; +#ifdef PO_DEBUG + pss->po.obj_type = POLL_OBJ_POLLSET_SET; +#endif + return pss; } -static void pollset_set_destroy(grpc_pollset_set *pollset_set) { - size_t i; - gpr_mu_destroy(&pollset_set->mu); - for (i = 0; i < pollset_set->fd_count; i++) { - GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); +static void pollset_set_destroy(grpc_pollset_set *pss) { + gpr_mu_destroy(&pss->po.mu); + + if (pss->po.pi != NULL) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy"); + grpc_exec_ctx_finish(&exec_ctx); } - gpr_free(pollset_set->pollsets); - gpr_free(pollset_set->pollset_sets); - gpr_free(pollset_set->fds); - gpr_free(pollset_set); + + gpr_free(pss); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - if (pollset_set->fd_count == pollset_set->fd_capacity) { - pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); - pollset_set->fds = gpr_realloc( - pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); - } - GRPC_FD_REF(fd, "pollset_set"); - pollset_set->fds[pollset_set->fd_count++] = fd; - for (i = 0; i < pollset_set->pollset_count; i++) { - pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); - } - for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); - } - gpr_mu_unlock(&pollset_set->mu); +static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, + grpc_fd *fd) { + add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po, + POLL_OBJ_FD); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - for (i = 0; i < pollset_set->fd_count; i++) { - if (pollset_set->fds[i] == fd) { - pollset_set->fd_count--; - GPR_SWAP(grpc_fd *, pollset_set->fds[i], - pollset_set->fds[pollset_set->fd_count]); - GRPC_FD_UNREF(fd, "pollset_set"); - break; - } - } - for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); - } - gpr_mu_unlock(&pollset_set->mu); +static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, + grpc_fd *fd) { + /* Nothing to do */ } static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - size_t i, j; - gpr_mu_lock(&pollset_set->mu); - if (pollset_set->pollset_count == pollset_set->pollset_capacity) { - pollset_set->pollset_capacity = - GPR_MAX(8, 2 * pollset_set->pollset_capacity); - pollset_set->pollsets = - gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * - sizeof(*pollset_set->pollsets)); - } - pollset_set->pollsets[pollset_set->pollset_count++] = pollset; - for (i = 0, j = 0; i < pollset_set->fd_count; i++) { - if (fd_is_orphaned(pollset_set->fds[i])) { - GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); - } else { - pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); - pollset_set->fds[j++] = pollset_set->fds[i]; - } - } - pollset_set->fd_count = j; - gpr_mu_unlock(&pollset_set->mu); + grpc_pollset_set *pss, grpc_pollset *ps) { + add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po, + POLL_OBJ_POLLSET); } static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - for (i = 0; i < pollset_set->pollset_count; i++) { - if (pollset_set->pollsets[i] == pollset) { - pollset_set->pollset_count--; - GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], - pollset_set->pollsets[pollset_set->pollset_count]); - break; - } - } - gpr_mu_unlock(&pollset_set->mu); + grpc_pollset_set *pss, grpc_pollset *ps) { + /* Nothing to do */ } static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, grpc_pollset_set *bag, grpc_pollset_set *item) { - size_t i, j; - gpr_mu_lock(&bag->mu); - if (bag->pollset_set_count == bag->pollset_set_capacity) { - bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity); - bag->pollset_sets = - gpr_realloc(bag->pollset_sets, - bag->pollset_set_capacity * sizeof(*bag->pollset_sets)); - } - bag->pollset_sets[bag->pollset_set_count++] = item; - for (i = 0, j = 0; i < bag->fd_count; i++) { - if (fd_is_orphaned(bag->fds[i])) { - GRPC_FD_UNREF(bag->fds[i], "pollset_set"); - } else { - pollset_set_add_fd(exec_ctx, item, bag->fds[i]); - bag->fds[j++] = bag->fds[i]; - } - } - bag->fd_count = j; - gpr_mu_unlock(&bag->mu); + add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po, + POLL_OBJ_POLLSET_SET); } static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, grpc_pollset_set *bag, grpc_pollset_set *item) { - size_t i; - gpr_mu_lock(&bag->mu); - for (i = 0; i < bag->pollset_set_count; i++) { - if (bag->pollset_sets[i] == item) { - bag->pollset_set_count--; - GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i], - bag->pollset_sets[bag->pollset_set_count]); - break; - } - } - gpr_mu_unlock(&bag->mu); + /* Nothing to do */ } /* Test helper functions @@ -1927,9 +1899,9 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, void *grpc_fd_get_polling_island(grpc_fd *fd) { polling_island *pi; - gpr_mu_lock(&fd->mu); - pi = fd->polling_island; - gpr_mu_unlock(&fd->mu); + gpr_mu_lock(&fd->po.mu); + pi = fd->po.pi; + gpr_mu_unlock(&fd->po.mu); return pi; } @@ -1937,9 +1909,9 @@ void *grpc_fd_get_polling_island(grpc_fd *fd) { void *grpc_pollset_get_polling_island(grpc_pollset *ps) { polling_island *pi; - gpr_mu_lock(&ps->mu); - pi = ps->polling_island; - gpr_mu_unlock(&ps->mu); + gpr_mu_lock(&ps->po.mu); + pi = ps->po.pi; + gpr_mu_unlock(&ps->po.mu); return pi; } @@ -2000,7 +1972,7 @@ static const grpc_event_engine_vtable vtable = { .workqueue_ref = workqueue_ref, .workqueue_unref = workqueue_unref, - .workqueue_enqueue = workqueue_enqueue, + .workqueue_scheduler = workqueue_scheduler, .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index 21b28e5554..5bc5621443 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -397,7 +397,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { if (!fd->released) { close(fd->fd); } - grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE); } static int fd_wrapped_fd(grpc_fd *fd) { @@ -457,16 +457,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) { static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st, grpc_closure *closure) { if (fd->shutdown) { - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), - NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown")); } else if (*st == CLOSURE_NOT_READY) { /* not ready ==> switch to a waiting state by setting the closure */ *st = closure; } else if (*st == CLOSURE_READY) { /* already ready ==> queue the closure to run immediately */ *st = CLOSURE_NOT_READY; - grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), - NULL); + grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown)); maybe_wake_one_watcher_locked(fd); } else { /* upcallptr was set to a different closure. This is an error! */ @@ -489,7 +487,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, return 0; } else { /* waiting ==> queue closure */ - grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); + grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown)); *st = CLOSURE_NOT_READY; return 1; } @@ -852,7 +850,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { GRPC_FD_UNREF(pollset->fds[i], "multipoller"); } pollset->fd_count = 0; - grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); } static void work_combine_error(grpc_error **composite, grpc_error *error) { @@ -901,7 +899,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset) && !grpc_closure_list_empty(pollset->idle_jobs)) { GPR_TIMER_MARK("pollset_work.idle_jobs", 0); - grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); goto done; } /* If we're shutting down then we don't execute any extended work */ @@ -1081,7 +1079,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ gpr_mu_lock(&pollset->mu); } else if (!grpc_closure_list_empty(pollset->idle_jobs)) { - grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); gpr_mu_unlock(&pollset->mu); grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(&pollset->mu); @@ -1100,7 +1098,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, pollset->shutdown_done = closure; pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset_has_workers(pollset)) { - grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); } if (!pollset->called_shutdown && !pollset_has_workers(pollset)) { pollset->called_shutdown = 1; @@ -1288,10 +1286,8 @@ static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} #endif -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue, grpc_closure *closure, - grpc_error *error) { - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); +static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { + return grpc_schedule_on_exec_ctx; } /******************************************************************************* @@ -1534,7 +1530,7 @@ static const grpc_event_engine_vtable vtable = { .workqueue_ref = workqueue_ref, .workqueue_unref = workqueue_unref, - .workqueue_enqueue = workqueue_enqueue, + .workqueue_scheduler = workqueue_scheduler, .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index ab139895fd..2975d619e1 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -275,9 +275,8 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) { } #endif -void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - grpc_closure *closure, grpc_error *error) { - g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error); +grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) { + return g_event_engine->workqueue_scheduler(workqueue); } #endif // GRPC_POSIX_SOCKET diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index cb5832539d..1068a4bad5 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -106,8 +106,7 @@ typedef struct grpc_event_engine_vtable { grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue); void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); #endif - void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - grpc_closure *closure, grpc_error *error); + grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue); } grpc_event_engine_vtable; void grpc_event_engine_init(void); diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c index 604713e578..6aa788f8e5 100644 --- a/src/core/lib/iomgr/exec_ctx.c +++ b/src/core/lib/iomgr/exec_ctx.c @@ -57,7 +57,6 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { return true; } -#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { bool did_something = 0; GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0); @@ -67,8 +66,10 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL; while (c != NULL) { grpc_closure *next = c->next_data.next; + grpc_error *error = c->error_data.error; did_something = true; - grpc_closure_run(exec_ctx, c, c->error_data.error); + c->cb(exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); c = next; } } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) { @@ -76,30 +77,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { } } GPR_ASSERT(exec_ctx->active_combiner == NULL); - if (exec_ctx->stealing_from_workqueue != NULL) { - if (grpc_exec_ctx_ready_to_finish(exec_ctx)) { - grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue, - exec_ctx->stolen_closure, - exec_ctx->stolen_closure->error_data.error); - GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue, - "exec_ctx_sched"); - exec_ctx->stealing_from_workqueue = NULL; - exec_ctx->stolen_closure = NULL; - } else { - grpc_closure *c = exec_ctx->stolen_closure; - GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue, - "exec_ctx_sched"); - exec_ctx->stealing_from_workqueue = NULL; - exec_ctx->stolen_closure = NULL; - grpc_error *error = c->error_data.error; - GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0); - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0); - grpc_exec_ctx_flush(exec_ctx); - did_something = true; - } - } GPR_TIMER_END("grpc_exec_ctx_flush", 0); return did_something; } @@ -109,104 +86,21 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) { grpc_exec_ctx_flush(exec_ctx); } -void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error, - grpc_workqueue *offload_target_or_null) { - GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0); - if (offload_target_or_null == NULL) { - grpc_closure_list_append(&exec_ctx->closure_list, closure, error); - } else if (exec_ctx->stealing_from_workqueue == NULL) { - exec_ctx->stealing_from_workqueue = offload_target_or_null; - closure->error_data.error = error; - exec_ctx->stolen_closure = closure; - } else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) { - grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error); - GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched"); - } else { /* stealing_from_workqueue == offload_target_or_null */ - grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, - exec_ctx->stolen_closure, - exec_ctx->stolen_closure->error_data.error); - closure->error_data.error = error; - exec_ctx->stolen_closure = closure; - GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched"); - } - GPR_TIMER_END("grpc_exec_ctx_sched", 0); +static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error) { + closure->cb(exec_ctx, closure->cb_arg, error); + GRPC_ERROR_UNREF(error); } -void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, - grpc_closure_list *list, - grpc_workqueue *offload_target_or_null) { - grpc_closure_list_move(list, &exec_ctx->closure_list); +static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error) { + grpc_closure_list_append(&exec_ctx->closure_list, closure, error); } void grpc_exec_ctx_global_init(void) {} void grpc_exec_ctx_global_shutdown(void) {} -#else -static gpr_mu g_mu; -static gpr_cv g_cv; -static int g_threads = 0; - -static void run_closure(void *arg) { - grpc_closure *closure = arg; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0); - grpc_exec_ctx_finish(&exec_ctx); - gpr_mu_lock(&g_mu); - if (--g_threads == 0) { - gpr_cv_signal(&g_cv); - } - gpr_mu_unlock(&g_mu); -} - -static void start_closure(grpc_closure *closure) { - gpr_thd_id id; - gpr_mu_lock(&g_mu); - g_threads++; - gpr_mu_unlock(&g_mu); - gpr_thd_new(&id, run_closure, closure, NULL); -} - -bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return false; } - -void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {} -void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - bool success, - grpc_workqueue *offload_target_or_null) { - GPR_ASSERT(offload_target_or_null == NULL); - if (closure == NULL) return; - closure->final_data = success; - start_closure(closure); -} - -void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, - grpc_closure_list *list, - grpc_workqueue *offload_target_or_null) { - GPR_ASSERT(offload_target_or_null == NULL); - if (list == NULL) return; - grpc_closure *p = list->head; - while (p) { - grpc_closure *start = p; - p = grpc_closure_next(start); - start_closure(start); - } - grpc_closure_list r = GRPC_CLOSURE_LIST_INIT; - *list = r; -} - -void grpc_exec_ctx_global_init(void) { - gpr_mu_init(&g_mu); - gpr_cv_init(&g_cv); -} - -void grpc_exec_ctx_global_shutdown(void) { - gpr_mu_lock(&g_mu); - while (g_threads != 0) { - gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); - } - gpr_mu_unlock(&g_mu); - - gpr_mu_destroy(&g_mu); - gpr_cv_destroy(&g_cv); -} -#endif +static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = { + exec_ctx_run, exec_ctx_sched}; +static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable}; +grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler; diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index 7e50cb9825..e566f1b3e8 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -66,17 +66,6 @@ typedef struct grpc_combiner grpc_combiner; #ifndef GRPC_EXECUTION_CONTEXT_SANITIZER struct grpc_exec_ctx { grpc_closure_list closure_list; - /** The workqueue we're stealing work from. - As items are queued to the execution context, we try to steal one - workqueue item and execute it inline (assuming the exec_ctx is not - finished) - doing so does not invalidate the workqueue's contract, and - provides a small latency win in cases where we get a hit */ - grpc_workqueue *stealing_from_workqueue; - /** The workqueue item that was stolen from the workqueue above. When new - items are scheduled to be offloaded to that workqueue, we need to update - this like a 1-deep fifo to maintain the invariant that workqueue items - queued by one thread are started in order */ - grpc_closure *stolen_closure; /** currently active combiner: updated only via combiner.c */ grpc_combiner *active_combiner; /** last active combiner in the active combiner list */ @@ -89,10 +78,7 @@ struct grpc_exec_ctx { /* initializer for grpc_exec_ctx: prefer to use GRPC_EXEC_CTX_INIT whenever possible */ #define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \ - { \ - GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \ - finish_check \ - } + { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check } #else struct grpc_exec_ctx { bool cached_ready_to_finish; @@ -108,6 +94,8 @@ struct grpc_exec_ctx { #define GRPC_EXEC_CTX_INIT \ GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL) +extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx; + /** Flush any work that has been enqueued onto this grpc_exec_ctx. * Caller must guarantee that no interfering locks are held. * Returns true if work was performed, false otherwise. */ @@ -115,14 +103,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx); /** Finish any pending work for a grpc_exec_ctx. Must be called before * the instance is destroyed, or work may be lost. */ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); -/** Add a closure to be executed in the future. - If \a offload_target_or_null is NULL, the closure will be executed at the - next exec_ctx.{finish,flush} point. - If \a offload_target_or_null is non-NULL, the closure will be scheduled - against the workqueue, and a reference to the workqueue will be consumed. */ -void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error, - grpc_workqueue *offload_target_or_null); /** Returns true if we'd like to leave this execution context as soon as possible: useful for deciding whether to do something more or not depending on outside context */ @@ -131,11 +111,6 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx); bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); /** A finish check that is always ready to finish */ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); -/** Add a list of closures to be executed at the next flush/finish point. - * Leaves \a list empty. */ -void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, - grpc_closure_list *list, - grpc_workqueue *offload_target_or_null); void grpc_exec_ctx_global_init(void); diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 8d7535d6fe..1342a28d8d 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -77,10 +77,18 @@ static void closure_exec_thread_func(void *ignored) { gpr_mu_unlock(&g_executor.mu); break; } else { - grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL); + grpc_closure *c = g_executor.closures.head; + grpc_closure_list_init(&g_executor.closures); + gpr_mu_unlock(&g_executor.mu); + while (c != NULL) { + grpc_closure *next = c->next_data.next; + grpc_error *error = c->error_data.error; + c->cb(&exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); + c = next; + } + grpc_exec_ctx_flush(&exec_ctx); } - gpr_mu_unlock(&g_executor.mu); - grpc_exec_ctx_flush(&exec_ctx); } grpc_exec_ctx_finish(&exec_ctx); } @@ -112,7 +120,8 @@ static void maybe_spawn_locked() { g_executor.pending_join = 1; } -void grpc_executor_push(grpc_closure *closure, grpc_error *error) { +static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error) { gpr_mu_lock(&g_executor.mu); if (g_executor.shutting_down == 0) { grpc_closure_list_append(&g_executor.closures, closure, error); @@ -133,7 +142,15 @@ void grpc_executor_shutdown() { * list below because we aren't accepting new work */ /* Execute pending callbacks, some may be performing cleanups */ - grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL); + grpc_closure *c = g_executor.closures.head; + grpc_closure_list_init(&g_executor.closures); + while (c != NULL) { + grpc_closure *next = c->next_data.next; + grpc_error *error = c->error_data.error; + c->cb(&exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); + c = next; + } grpc_exec_ctx_finish(&exec_ctx); GPR_ASSERT(grpc_closure_list_empty(g_executor.closures)); if (pending_join) { @@ -141,3 +158,8 @@ void grpc_executor_shutdown() { } gpr_mu_destroy(&g_executor.mu); } + +static const grpc_closure_scheduler_vtable executor_vtable = {executor_push, + executor_push}; +static grpc_closure_scheduler executor_scheduler = {&executor_vtable}; +grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler; diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h index da9dcd07d0..53f3b6d441 100644 --- a/src/core/lib/iomgr/executor.h +++ b/src/core/lib/iomgr/executor.h @@ -43,9 +43,7 @@ * non-blocking solution available. */ void grpc_executor_init(); -/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate - * thread */ -void grpc_executor_push(grpc_closure *closure, grpc_error *error); +extern grpc_closure_scheduler *grpc_executor_scheduler; /** Shutdown the executor, running all pending work as part of the call */ void grpc_executor_shutdown(); diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index 3a74b842b6..ed3edeee94 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -83,7 +83,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, // Drain any pending UV callbacks without blocking uv_run(uv_default_loop(), UV_RUN_NOWAIT); } - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); } void grpc_pollset_destroy(grpc_pollset *pollset) { diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c index 5540303e49..2a45e708df 100644 --- a/src/core/lib/iomgr/pollset_windows.c +++ b/src/core/lib/iomgr/pollset_windows.c @@ -109,7 +109,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, pollset->shutting_down = 1; grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset->is_iocp_worker) { - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); } else { pollset->on_shutdown = closure; } @@ -167,8 +167,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } if (pollset->shutting_down && pollset->on_shutdown != NULL) { - grpc_exec_ctx_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE, - NULL); + grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE); pollset->on_shutdown = NULL; } goto done; diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h index 275924448a..e03d16fa4e 100644 --- a/src/core/lib/iomgr/resolve_address.h +++ b/src/core/lib/iomgr/resolve_address.h @@ -36,6 +36,7 @@ #include <stddef.h> #include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/pollset_set.h" #define GRPC_MAX_SOCKADDR_SIZE 128 @@ -54,6 +55,7 @@ typedef struct { /* TODO(ctiller): add a timeout here */ extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses); /* Destroy resolved addresses */ diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c index de791b2b67..50e470d149 100644 --- a/src/core/lib/iomgr/resolve_address_posix.c +++ b/src/core/lib/iomgr/resolve_address_posix.c @@ -163,10 +163,9 @@ typedef struct { static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, grpc_error *error) { request *r = rp; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, r->on_done, - grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out), - NULL); + grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out)); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); @@ -181,20 +180,22 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { request *r = gpr_malloc(sizeof(request)); - grpc_closure_init(&r->request_closure, do_request_thread, r); + grpc_closure_init(&r->request_closure, do_request_thread, r, + grpc_executor_scheduler); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addrs_out = addrs; - grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addrs) = resolve_address_impl; #endif diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index b8295acfa1..9b5f3209f0 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -98,7 +98,7 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_error *error; error = handle_addrinfo_result(status, res, r->addresses); - grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL); + grpc_closure_sched(&exec_ctx, r->on_done, error); grpc_exec_ctx_finish(&exec_ctx); gpr_free(r->hints); @@ -181,6 +181,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { uv_getaddrinfo_t *req; @@ -192,7 +193,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, int s; err = try_split_host_port(name, default_port, &host, &port); if (err != GRPC_ERROR_NONE) { - grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL); + grpc_closure_sched(exec_ctx, on_done, err); return; } r = gpr_malloc(sizeof(request)); @@ -216,16 +217,16 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, *addrs = NULL; err = GRPC_ERROR_CREATE("getaddrinfo failed"); err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s)); - grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL); + grpc_closure_sched(exec_ctx, on_done, err); gpr_free(r); gpr_free(req); gpr_free(hints); } } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addrs) = resolve_address_impl; #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c index e139293c03..2439ce3cb7 100644 --- a/src/core/lib/iomgr/resolve_address_windows.c +++ b/src/core/lib/iomgr/resolve_address_windows.c @@ -154,7 +154,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, } else { GRPC_ERROR_REF(error); } - grpc_exec_ctx_sched(exec_ctx, r->on_done, error, NULL); + grpc_closure_sched(exec_ctx, r->on_done, error); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); @@ -169,20 +169,22 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses) { request *r = gpr_malloc(sizeof(request)); - grpc_closure_init(&r->request_closure, do_request_thread, r); + grpc_closure_init(&r->request_closure, do_request_thread, r, + grpc_executor_scheduler); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addresses = addresses; - grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addresses) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addresses) = resolve_address_impl; #endif diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c index 213d29600c..8db539edfb 100644 --- a/src/core/lib/iomgr/resource_quota.c +++ b/src/core/lib/iomgr/resource_quota.c @@ -265,9 +265,8 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx, if (resource_quota->step_scheduled) return; resource_quota->step_scheduled = true; grpc_resource_quota_internal_ref(resource_quota); - grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner, - &resource_quota->rq_step_closure, - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure, + GRPC_ERROR_NONE); } /* returns true if all allocations are completed */ @@ -294,7 +293,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, } if (resource_user->free_pool >= 0) { resource_user->allocating = false; - grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL); + grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated); gpr_mu_unlock(&resource_user->mu); } else { rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); @@ -439,7 +438,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx, resource_user->new_reclaimers[destructive] = NULL; GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED); return false; } resource_user->reclaimers[destructive] = closure; @@ -480,10 +479,10 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { grpc_resource_user *resource_user = ru; - grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED, NULL); - grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, resource_user->reclaimers[0], + GRPC_ERROR_CANCELLED); + grpc_closure_sched(exec_ctx, resource_user->reclaimers[1], + GRPC_ERROR_CANCELLED); resource_user->reclaimers[0] = NULL; resource_user->reclaimers[1] = NULL; rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); @@ -496,10 +495,10 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, (grpc_rulist)i); } - grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED, NULL); - grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, resource_user->reclaimers[0], + GRPC_ERROR_CANCELLED); + grpc_closure_sched(exec_ctx, resource_user->reclaimers[1], + GRPC_ERROR_CANCELLED); if (resource_user->free_pool != 0) { resource_user->resource_quota->free_pool += resource_user->free_pool; rq_step_sched(exec_ctx, resource_user->resource_quota); @@ -571,9 +570,12 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR, (intptr_t)resource_quota); } - grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota); + grpc_closure_init( + &resource_quota->rq_step_closure, rq_step, resource_quota, + grpc_combiner_finally_scheduler(resource_quota->combiner, true)); grpc_closure_init(&resource_quota->rq_reclamation_done_closure, - rq_reclamation_done, resource_quota); + rq_reclamation_done, resource_quota, + grpc_combiner_scheduler(resource_quota->combiner, false)); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { resource_quota->roots[i] = NULL; } @@ -614,9 +616,8 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, rq_resize_args *a = gpr_malloc(sizeof(*a)); a->resource_quota = grpc_resource_quota_internal_ref(resource_quota); a->size = (int64_t)size; - grpc_closure_init(&a->closure, rq_resize, a); - grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure, - GRPC_ERROR_NONE, false); + grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx); + grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE); grpc_exec_ctx_finish(&exec_ctx); } @@ -663,15 +664,19 @@ grpc_resource_user *grpc_resource_user_create( resource_user->resource_quota = grpc_resource_quota_internal_ref(resource_quota); grpc_closure_init(&resource_user->allocate_closure, &ru_allocate, - resource_user); + resource_user, + grpc_combiner_scheduler(resource_quota->combiner, false)); grpc_closure_init(&resource_user->add_to_free_pool_closure, - &ru_add_to_free_pool, resource_user); + &ru_add_to_free_pool, resource_user, + grpc_combiner_scheduler(resource_quota->combiner, false)); grpc_closure_init(&resource_user->post_reclaimer_closure[0], - &ru_post_benign_reclaimer, resource_user); + &ru_post_benign_reclaimer, resource_user, + grpc_combiner_scheduler(resource_quota->combiner, false)); grpc_closure_init(&resource_user->post_reclaimer_closure[1], - &ru_post_destructive_reclaimer, resource_user); - grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, - resource_user); + &ru_post_destructive_reclaimer, resource_user, + grpc_combiner_scheduler(resource_quota->combiner, false)); + grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user, + grpc_combiner_scheduler(resource_quota->combiner, false)); gpr_mu_init(&resource_user->mu); gpr_atm_rel_store(&resource_user->refs, 1); gpr_atm_rel_store(&resource_user->shutdown, 0); @@ -706,9 +711,8 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx, gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); GPR_ASSERT(old >= amount); if (old == amount) { - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->destroy_closure, GRPC_ERROR_NONE, - false); + grpc_closure_sched(exec_ctx, &resource_user->destroy_closure, + GRPC_ERROR_NONE); } } @@ -724,9 +728,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, grpc_resource_user *resource_user) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - grpc_closure_create(ru_shutdown, resource_user), - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, + grpc_closure_create( + ru_shutdown, resource_user, + grpc_combiner_scheduler( + resource_user->resource_quota->combiner, false)), + GRPC_ERROR_NONE); } } @@ -746,12 +753,11 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE); if (!resource_user->allocating) { resource_user->allocating = true; - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->allocate_closure, GRPC_ERROR_NONE, - false); + grpc_closure_sched(exec_ctx, &resource_user->allocate_closure, + GRPC_ERROR_NONE); } } else { - grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); } @@ -770,9 +776,8 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, if (is_bigger_than_zero && was_zero_or_negative && !resource_user->added_to_free_pool) { resource_user->added_to_free_pool = true; - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->add_to_free_pool_closure, - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure, + GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); ru_unref_by(exec_ctx, resource_user, (gpr_atm)size); @@ -784,9 +789,9 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, grpc_closure *closure) { GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL); resource_user->new_reclaimers[destructive] = closure; - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->post_reclaimer_closure[destructive], - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, + &resource_user->post_reclaimer_closure[destructive], + GRPC_ERROR_NONE); } void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, @@ -795,18 +800,20 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", resource_user->resource_quota->name, resource_user->name); } - grpc_combiner_execute( - exec_ctx, resource_user->resource_quota->combiner, - &resource_user->resource_quota->rq_reclamation_done_closure, - GRPC_ERROR_NONE, false); + grpc_closure_sched( + exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure, + GRPC_ERROR_NONE); } void grpc_resource_user_slice_allocator_init( grpc_resource_user_slice_allocator *slice_allocator, grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) { - grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices, - slice_allocator); - grpc_closure_init(&slice_allocator->on_done, cb, p); + grpc_closure_init( + &slice_allocator->on_allocated, ru_allocated_slices, slice_allocator, + grpc_combiner_scheduler(resource_user->resource_quota->combiner, false)); + grpc_closure_init( + &slice_allocator->on_done, cb, p, + grpc_combiner_scheduler(resource_user->resource_quota->combiner, false)); slice_allocator->resource_user = resource_user; } diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c index 54911e0e31..2f2e02f715 100644 --- a/src/core/lib/iomgr/socket_windows.c +++ b/src/core/lib/iomgr/socket_windows.c @@ -131,7 +131,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&socket->state_mu); if (info->has_pending_iocp) { info->has_pending_iocp = 0; - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); } else { info->closure = closure; } @@ -154,7 +154,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, GPR_ASSERT(!info->has_pending_iocp); gpr_mu_lock(&socket->state_mu); if (info->closure) { - grpc_exec_ctx_sched(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE); info->closure = NULL; } else { info->has_pending_iocp = 1; diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c index a3a70a8ed7..d089d2bc3b 100644 --- a/src/core/lib/iomgr/tcp_client_posix.c +++ b/src/core/lib/iomgr/tcp_client_posix.c @@ -265,7 +265,7 @@ finish: grpc_channel_args_destroy(ac->channel_args); gpr_free(ac); } - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); + grpc_closure_sched(exec_ctx, closure, error); } static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, @@ -294,7 +294,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); if (error != GRPC_ERROR_NONE) { - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); + grpc_closure_sched(exec_ctx, closure, error); return; } if (dsmode == GRPC_DSMODE_IPV4) { @@ -303,7 +303,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, addr = &addr4_copy; } if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) { - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); + grpc_closure_sched(exec_ctx, closure, error); return; } @@ -321,14 +321,13 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, if (err >= 0) { *ep = grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str); - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); goto done; } if (errno != EWOULDBLOCK && errno != EINPROGRESS) { grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error"); - grpc_exec_ctx_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"), - NULL); + grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect")); goto done; } @@ -343,8 +342,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, addr_str = NULL; gpr_mu_init(&ac->mu); ac->refs = 2; - ac->write_closure.cb = on_writable; - ac->write_closure.cb_arg = ac; + grpc_closure_init(&ac->write_closure, on_writable, ac, + grpc_schedule_on_exec_ctx); ac->channel_args = grpc_channel_args_copy(channel_args); if (grpc_tcp_trace) { diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index b07f9ceffa..b1664b85fd 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -110,7 +110,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { if (done) { uv_tcp_connect_cleanup(&exec_ctx, connect); } - grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL); + grpc_closure_sched(&exec_ctx, closure, error); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c index 1127588ebc..692252bbe0 100644 --- a/src/core/lib/iomgr/tcp_client_windows.c +++ b/src/core/lib/iomgr/tcp_client_windows.c @@ -129,7 +129,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { async_connect_unlock_and_cleanup(exec_ctx, ac, socket); /* If the connection was aborted, the callback was already called when the deadline was met. */ - grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL); + grpc_closure_sched(exec_ctx, on_done, error); } /* Tries to issue one async connection, then schedules both an IOCP @@ -227,7 +227,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done, ac->addr_name = grpc_sockaddr_to_uri(addr); ac->endpoint = endpoint; ac->resource_quota = resource_quota; - grpc_closure_init(&ac->on_connect, on_connect, ac); + grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx); grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC)); @@ -247,7 +247,7 @@ failure: closesocket(sock); } grpc_resource_quota_internal_unref(exec_ctx, resource_quota); - grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL); + grpc_closure_sched(exec_ctx, on_done, final_error); } #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 12a4797e6f..21a0371d10 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -107,6 +107,12 @@ typedef struct { grpc_resource_user_slice_allocator slice_allocator; } grpc_tcp; +static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) { + return grpc_error_set_str( + grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd), + GRPC_ERROR_STR_TARGET_ADDRESS, tcp->peer_string); +} + static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error); static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, @@ -230,13 +236,15 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); } else { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, GRPC_OS_ERROR(errno, "recvmsg")); + call_read_cb(exec_ctx, tcp, + tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("Socket closed")); + call_read_cb(exec_ctx, tcp, + tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); @@ -308,7 +316,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, tcp->finished_edge = false; grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); } else { - grpc_exec_ctx_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE); } } @@ -365,8 +373,13 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) { tcp->outgoing_slice_idx = unwind_slice_idx; tcp->outgoing_byte_idx = unwind_byte_idx; return false; + } else if (errno == EPIPE) { + *error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"), + GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); + return true; } else { - *error = GRPC_OS_ERROR(errno, "sendmsg"); + *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp); return true; } } @@ -447,10 +460,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (buf->length == 0) { GPR_TIMER_END("tcp_write", 0); - grpc_exec_ctx_sched(exec_ctx, cb, grpc_fd_is_shutdown(tcp->em_fd) - ? GRPC_ERROR_CREATE("EOF") - : GRPC_ERROR_NONE, - NULL); + grpc_closure_sched(exec_ctx, cb, + grpc_fd_is_shutdown(tcp->em_fd) + ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp) + : GRPC_ERROR_NONE); return; } tcp->outgoing_buffer = buf; @@ -470,7 +483,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_log(GPR_DEBUG, "write: %s", str); grpc_error_free_string(str); } - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); } GPR_TIMER_END("tcp_write", 0); @@ -538,10 +551,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, gpr_ref_init(&tcp->refcount, 1); gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); tcp->em_fd = em_fd; - tcp->read_closure.cb = tcp_handle_read; - tcp->read_closure.cb_arg = tcp; - tcp->write_closure.cb = tcp_handle_write; - tcp->write_closure.cb_arg = tcp; + grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp, + grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&tcp->last_read_buffer); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); grpc_resource_user_slice_allocator_init( diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h index 6eba8c4057..437a94beff 100644 --- a/src/core/lib/iomgr/tcp_server.h +++ b/src/core/lib/iomgr/tcp_server.h @@ -52,7 +52,8 @@ typedef struct grpc_tcp_server_acceptor { unsigned fd_index; } grpc_tcp_server_acceptor; -/* Called for newly connected TCP connections. */ +/* Called for newly connected TCP connections. + Takes ownership of acceptor. */ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *ep, grpc_pollset *accepting_pollset, diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 7e2fb0f1f9..6db624dd56 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -208,7 +208,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { GPR_ASSERT(s->shutdown); gpr_mu_unlock(&s->mu); if (s->shutdown_complete != NULL) { - grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); } gpr_mu_destroy(&s->mu); @@ -254,8 +254,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { grpc_tcp_listener *sp; for (sp = s->head; sp; sp = sp->next) { grpc_unlink_if_unix_domain_socket(&sp->addr); - sp->destroyed_closure.cb = destroyed_port; - sp->destroyed_closure.cb_arg = s; + grpc_closure_init(&sp->destroyed_closure, destroyed_port, s, + grpc_schedule_on_exec_ctx); grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, "tcp_listener_shutdown"); } @@ -381,16 +381,12 @@ error: /* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_tcp_listener *sp = arg; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, - sp->fd_index}; - grpc_pollset *read_notifier_pollset = NULL; - grpc_fd *fdobj; if (err != GRPC_ERROR_NONE) { goto error; } - read_notifier_pollset = + grpc_pollset *read_notifier_pollset = sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( &sp->server->next_pollset_to_assign, 1) % sp->server->pollset_count]; @@ -426,7 +422,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); } - fdobj = grpc_fd_create(fd, name); + grpc_fd *fdobj = grpc_fd_create(fd, name); if (read_notifier_pollset == NULL) { gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd"); @@ -435,11 +431,17 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = sp->fd_index; + sp->server->on_accept_cb( exec_ctx, sp->server->on_accept_cb_arg, grpc_tcp_create(fdobj, sp->server->resource_quota, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str), - read_notifier_pollset, &acceptor); + read_notifier_pollset, acceptor); gpr_free(name); gpr_free(addr_str); @@ -721,8 +723,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, "clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); - sp->read_closure.cb = on_read; - sp->read_closure.cb_arg = sp; + grpc_closure_init(&sp->read_closure, on_read, sp, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; @@ -731,8 +733,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); } - sp->read_closure.cb = on_read; - sp->read_closure.cb_arg = sp; + grpc_closure_init(&sp->read_closure, on_read, sp, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; @@ -758,7 +760,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { grpc_tcp_server_shutdown_listeners(exec_ctx, s); gpr_mu_lock(&s->mu); - grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); + grpc_closure_list_sched(exec_ctx, &s->shutdown_starting); gpr_mu_unlock(&s->mu); tcp_server_destroy(exec_ctx, s); } diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index b5b9b92a20..8abc60d624 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -126,7 +126,7 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (s->shutdown_complete != NULL) { - grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); } while (s->head) { @@ -170,7 +170,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { /* Complete shutdown_starting work before destroying. */ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL); + grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting); if (exec_ctx == NULL) { grpc_exec_ctx_flush(&local_exec_ctx); tcp_server_destroy(&local_exec_ctx, s); @@ -188,7 +188,6 @@ static void accepted_connection_close_cb(uv_handle_t *handle) { static void on_connect(uv_stream_t *server, int status) { grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0}; uv_tcp_t *client; grpc_endpoint *ep = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -201,6 +200,7 @@ static void on_connect(uv_stream_t *server, int status) { uv_strerror(status)); return; } + client = gpr_malloc(sizeof(uv_tcp_t)); uv_tcp_init(uv_default_loop(), client); // UV documentation says this is guaranteed to succeed @@ -220,8 +220,13 @@ static void on_connect(uv_stream_t *server, int status) { gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status)); } ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - &acceptor); + acceptor); grpc_exec_ctx_finish(&exec_ctx); } } diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index b8a391c059..6302bff5d2 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -162,11 +162,12 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (s->shutdown_complete != NULL) { - grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); } - grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s), - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { @@ -204,7 +205,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { grpc_tcp_server_shutdown_listeners(exec_ctx, s); gpr_mu_lock(&s->mu); - grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); + grpc_closure_list_sched(exec_ctx, &s->shutdown_starting); gpr_mu_unlock(&s->mu); tcp_server_destroy(exec_ctx, s); } @@ -323,7 +324,6 @@ failure: /* Event manager callback when reads are ready. */ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_tcp_listener *sp = arg; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0}; SOCKET sock = sp->new_socket; grpc_winsocket_callback_info *info = &sp->socket->read_info; grpc_endpoint *ep = NULL; @@ -396,8 +396,13 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* The only time we should call our callback, is where we successfully managed to accept a connection, and created an endpoint. */ if (ep) { + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - &acceptor); + acceptor); } /* As we were notified from the IOCP of one and exactly one accept, the former socked we created has now either been destroy or assigned @@ -461,7 +466,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, sp->new_socket = INVALID_SOCKET; sp->port = port; sp->port_index = port_index; - grpc_closure_init(&sp->on_accept, on_accept, sp); + grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx); GPR_ASSERT(sp->socket); gpr_mu_unlock(&s->mu); *listener = sp; diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index 6e2ad1dbe9..4be95457d5 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -169,7 +169,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread, // nread < 0: Error error = GRPC_ERROR_CREATE("TCP Read failed"); } - grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); + grpc_closure_sched(&exec_ctx, cb, error); grpc_exec_ctx_finish(&exec_ctx); } @@ -190,7 +190,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, error = GRPC_ERROR_CREATE("TCP Read failed at start"); error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status)); - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); } if (grpc_tcp_trace) { const char *str = grpc_error_string(error); @@ -217,7 +217,7 @@ static void write_callback(uv_write_t *req, int status) { gpr_free(tcp->write_buffers); grpc_resource_user_free(&exec_ctx, tcp->resource_user, sizeof(uv_buf_t) * tcp->write_slices->count); - grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); + grpc_closure_sched(&exec_ctx, cb, error); grpc_exec_ctx_finish(&exec_ctx); } @@ -243,8 +243,8 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } if (tcp->shutting_down) { - grpc_exec_ctx_sched(exec_ctx, cb, - GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); + grpc_closure_sched(exec_ctx, cb, + GRPC_ERROR_CREATE("TCP socket is shutting down")); return; } @@ -254,7 +254,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (tcp->write_slices->count == 0) { // No slices means we don't have to do anything, // and libuv doesn't like empty writes - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE); return; } diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index d4613b674e..fa13ac1d25 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -188,7 +188,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { tcp->read_cb = NULL; TCP_UNREF(exec_ctx, tcp, "read"); - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); } static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, @@ -202,8 +202,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, WSABUF buffer; if (tcp->shutting_down) { - grpc_exec_ctx_sched(exec_ctx, cb, - GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); + grpc_closure_sched(exec_ctx, cb, + GRPC_ERROR_CREATE("TCP socket is shutting down")); return; } @@ -227,7 +227,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { info->bytes_transfered = bytes_read; - grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE); return; } @@ -240,8 +240,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { info->wsa_error = wsa_error; - grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, - GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL); + grpc_closure_sched(exec_ctx, &tcp->on_read, + GRPC_WSA_ERROR(info->wsa_error, "WSARecv")); return; } } @@ -272,7 +272,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } TCP_UNREF(exec_ctx, tcp, "write"); - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); } /* Initiates a write. */ @@ -290,8 +290,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, size_t len; if (tcp->shutting_down) { - grpc_exec_ctx_sched(exec_ctx, cb, - GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL); + grpc_closure_sched(exec_ctx, cb, + GRPC_ERROR_CREATE("TCP socket is shutting down")); return; } @@ -322,7 +322,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *error = status == 0 ? GRPC_ERROR_NONE : GRPC_WSA_ERROR(info->wsa_error, "WSASend"); - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); if (allocated) gpr_free(allocated); return; } @@ -340,8 +340,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { TCP_UNREF(exec_ctx, tcp, "write"); - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"), - NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); return; } } @@ -424,8 +423,8 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, tcp->socket = socket; gpr_mu_init(&tcp->mu); gpr_ref_init(&tcp->refcount, 1); - grpc_closure_init(&tcp->on_read, on_read, tcp); - grpc_closure_init(&tcp->on_write, on_write, tcp); + grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx); + grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx); tcp->peer_string = gpr_strdup(peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); /* Tell network status tracking code about the new endpoint */ diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 00058f9d86..ecd3b284dc 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -184,22 +184,22 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, shard_type *shard = &g_shards[shard_idx(timer)]; GPR_ASSERT(deadline.clock_type == g_clock_type); GPR_ASSERT(now.clock_type == g_clock_type); - grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg); + grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg, + grpc_schedule_on_exec_ctx); timer->deadline = deadline; timer->triggered = 0; if (!g_initialized) { timer->triggered = 1; - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, &timer->closure, - GRPC_ERROR_CREATE("Attempt to create timer before initialization"), - NULL); + GRPC_ERROR_CREATE("Attempt to create timer before initialization")); return; } if (gpr_time_cmp(deadline, now) <= 0) { timer->triggered = 1; - grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE); return; } @@ -251,7 +251,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { shard_type *shard = &g_shards[shard_idx(timer)]; gpr_mu_lock(&shard->mu); if (!timer->triggered) { - grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED); timer->triggered = 1; if (timer->heap_index == INVALID_HEAP_INDEX) { list_remove(timer); @@ -317,7 +317,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard, grpc_timer *timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { - grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL); + grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error)); n++; } *new_min_deadline = compute_min_deadline(shard); diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index cfcb89268b..00b835ffb8 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -55,7 +55,7 @@ void run_expired_timer(uv_timer_t *handle) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(!timer->triggered); timer->triggered = 1; - grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE); stop_uv_timer(handle); grpc_exec_ctx_finish(&exec_ctx); } @@ -65,10 +65,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, void *timer_cb_arg, gpr_timespec now) { uint64_t timeout; uv_timer_t *uv_timer; - grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg); + grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg, + grpc_schedule_on_exec_ctx); if (gpr_time_cmp(deadline, now) <= 0) { timer->triggered = 1; - grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE); return; } timer->triggered = 0; @@ -83,7 +84,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { if (!timer->triggered) { timer->triggered = 1; - grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED); stop_uv_timer((uv_timer_t *)timer->uv_timer); } } diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c index 3c24ea9afa..dfbd295c91 100644 --- a/src/core/lib/iomgr/udp_server.c +++ b/src/core/lib/iomgr/udp_server.c @@ -126,7 +126,7 @@ grpc_udp_server *grpc_udp_server_create(void) { static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { if (s->shutdown_complete != NULL) { - grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); } gpr_mu_destroy(&s->mu); @@ -170,8 +170,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { for (sp = s->head; sp; sp = sp->next) { grpc_unlink_if_unix_domain_socket(&sp->addr); - sp->destroyed_closure.cb = destroyed_port; - sp->destroyed_closure.cb_arg = s; + grpc_closure_init(&sp->destroyed_closure, destroyed_port, s, + grpc_schedule_on_exec_ctx); /* Call the orphan_cb to signal that the FD is about to be closed and * should no longer be used. */ @@ -446,8 +446,8 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); } - sp->read_closure.cb = on_read; - sp->read_closure.cb_arg = sp; + grpc_closure_init(&sp->read_closure, on_read, sp, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h index 73d9849843..371b0f55dc 100644 --- a/src/core/lib/iomgr/workqueue.h +++ b/src/core/lib/iomgr/workqueue.h @@ -72,17 +72,16 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue); void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); #endif -/** Add a work item to a workqueue. Items added to a work queue will be started - in approximately the order they were enqueued, on some thread that may or - may not be the current thread. Successive closures enqueued onto a workqueue - MAY be executed concurrently. +/** Fetch the workqueue closure scheduler. Items added to a work queue will be + started in approximately the order they were enqueued, on some thread that + may or may not be the current thread. Successive closures enqueued onto a + workqueue MAY be executed concurrently. It is generally more expensive to add a closure to a workqueue than to the execution context, both in terms of CPU work and in execution latency. Use work queues when it's important that other threads be given a chance to tackle some workload. */ -void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - grpc_closure *closure, grpc_error *error); +grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue); #endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */ diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c index e58ca476cc..4d61b40912 100644 --- a/src/core/lib/iomgr/workqueue_uv.c +++ b/src/core/lib/iomgr/workqueue_uv.c @@ -58,9 +58,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) { void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} #endif -void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - grpc_closure *closure, grpc_error *error) { - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); +grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) { + return grpc_schedule_on_exec_ctx; } #endif /* GPR_UV */ diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c index 5c93d3c59e..234b47cdf5 100644 --- a/src/core/lib/iomgr/workqueue_windows.c +++ b/src/core/lib/iomgr/workqueue_windows.c @@ -56,9 +56,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) { void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} #endif -void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - grpc_closure *closure, grpc_error *error) { - grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); +grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) { + return grpc_schedule_on_exec_ctx; } #endif /* GPR_WINDOWS */ diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c index ea4cb76fb9..1cf142fa9a 100644 --- a/src/core/lib/security/credentials/fake/fake_credentials.c +++ b/src/core/lib/security/credentials/fake/fake_credentials.c @@ -113,9 +113,10 @@ static void md_only_test_get_request_metadata( if (c->is_async) { grpc_credentials_metadata_request *cb_arg = grpc_credentials_metadata_request_create(creds, cb, user_data); - grpc_executor_push( - grpc_closure_create(on_simulated_token_fetch_done, cb_arg), - GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, + grpc_closure_create(on_simulated_token_fetch_done, + cb_arg, grpc_executor_scheduler), + GRPC_ERROR_NONE); } else { cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL); } diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c index afe0e3d357..caf57c856b 100644 --- a/src/core/lib/security/credentials/google_default/google_default_credentials.c +++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c @@ -130,7 +130,8 @@ static int is_stack_running_on_compute_engine(void) { grpc_httpcli_get( &exec_ctx, &context, &detector.pollent, resource_quota, &request, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), - grpc_closure_create(on_compute_engine_detection_http_response, &detector), + grpc_closure_create(on_compute_engine_detection_http_response, &detector, + grpc_schedule_on_exec_ctx), &detector.response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); @@ -155,7 +156,8 @@ static int is_stack_running_on_compute_engine(void) { grpc_httpcli_context_destroy(&context); grpc_closure_init(&destroy_closure, destroy_pollset, - grpc_polling_entity_pollset(&detector.pollent)); + grpc_polling_entity_pollset(&detector.pollent), + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&detector.pollent), &destroy_closure); diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c index 42bd89dd0a..8c75098612 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.c +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c @@ -39,6 +39,7 @@ #include "src/core/lib/http/httpcli.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/security/util/b64.h" +#include "src/core/lib/support/string.h" #include "src/core/lib/tsi/ssl_types.h" #include <grpc/support/alloc.h> @@ -305,6 +306,17 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, return GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE; } + /* This should be probably up to the upper layer to decide but let's harcode + the 99% use case here for email issuers, where the JWT must be self + issued. */ + if (grpc_jwt_issuer_email_domain(claims->iss) != NULL && + claims->sub != NULL && strcmp(claims->iss, claims->sub) != 0) { + gpr_log(GPR_ERROR, + "Email issuer (%s) cannot assert another subject (%s) than itself.", + claims->iss, claims->sub); + return GRPC_JWT_VERIFIER_BAD_SUBJECT; + } + if (audience == NULL) { audience_ok = claims->aud == NULL; } else { @@ -665,7 +677,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, grpc_httpcli_get( exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), - grpc_closure_create(on_keys_retrieved, ctx), + grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx), &ctx->responses[HTTP_RESPONSE_KEYS]); grpc_resource_quota_internal_unref(exec_ctx, resource_quota); grpc_json_destroy(json); @@ -705,10 +717,26 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain, GPR_ASSERT(v->num_mappings <= v->allocated_mappings); } +/* Very non-sophisticated way to detect an email address. Should be good + enough for now... */ +const char *grpc_jwt_issuer_email_domain(const char *issuer) { + const char *at_sign = strchr(issuer, '@'); + if (at_sign == NULL) return NULL; + const char *email_domain = at_sign + 1; + if (*email_domain == '\0') return NULL; + const char *dot = strrchr(email_domain, '.'); + if (dot == NULL || dot == email_domain) return email_domain; + GPR_ASSERT(dot > email_domain); + /* There may be a subdomain, we just want the domain. */ + dot = gpr_memrchr(email_domain, '.', (size_t)(dot - email_domain)); + if (dot == NULL) return email_domain; + return dot + 1; +} + /* Takes ownership of ctx. */ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { - const char *at_sign; + const char *email_domain; grpc_closure *http_cb; char *path_prefix = NULL; const char *iss; @@ -733,13 +761,9 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, Nobody seems to implement the account/email/webfinger part 2. of the spec so we will rely instead on email/url mappings if we detect such an issuer. Part 4, on the other hand is implemented by both google and salesforce. */ - - /* Very non-sophisticated way to detect an email address. Should be good - enough for now... */ - at_sign = strchr(iss, '@'); - if (at_sign != NULL) { + email_domain = grpc_jwt_issuer_email_domain(iss); + if (email_domain != NULL) { email_key_mapping *mapping; - const char *email_domain = at_sign + 1; GPR_ASSERT(ctx->verifier != NULL); mapping = verifier_get_mapping(ctx->verifier, email_domain); if (mapping == NULL) { @@ -754,7 +778,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, *(path_prefix++) = '\0'; gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss); } - http_cb = grpc_closure_create(on_keys_retrieved, ctx); + http_cb = + grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx); rsp_idx = HTTP_RESPONSE_KEYS; } else { req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss); @@ -766,7 +791,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, gpr_asprintf(&req.http.path, "/%s%s", path_prefix, GRPC_OPENID_CONFIG_URL_SUFFIX); } - http_cb = grpc_closure_create(on_openid_config_retrieved, ctx); + http_cb = grpc_closure_create(on_openid_config_retrieved, ctx, + grpc_schedule_on_exec_ctx); rsp_idx = HTTP_RESPONSE_OPENID; } diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index f09f9d5d47..54ff9b05e5 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -43,8 +43,7 @@ /* --- Constants. --- */ #define GRPC_OPENID_CONFIG_URL_SUFFIX "/.well-known/openid-configuration" -#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN \ - "developer.gserviceaccount.com" +#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN "gserviceaccount.com" #define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \ "www.googleapis.com/robot/v1/metadata/x509" @@ -57,6 +56,7 @@ typedef enum { GRPC_JWT_VERIFIER_BAD_AUDIENCE, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE, + GRPC_JWT_VERIFIER_BAD_SUBJECT, GRPC_JWT_VERIFIER_GENERIC_ERROR } grpc_jwt_verifier_status; @@ -132,5 +132,6 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer); grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, const char *audience); +const char *grpc_jwt_issuer_email_domain(const char *issuer); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */ diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c index b3625b22c0..9aa7863977 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c @@ -312,9 +312,10 @@ static void compute_engine_fetch_oauth2( extreme memory pressure. */ grpc_resource_quota *resource_quota = grpc_resource_quota_create("oauth2_credentials"); - grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request, - deadline, grpc_closure_create(response_cb, metadata_req), - &metadata_req->response); + grpc_httpcli_get( + exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline, + grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx), + &metadata_req->response); grpc_resource_quota_internal_unref(exec_ctx, resource_quota); } @@ -368,10 +369,11 @@ static void refresh_token_fetch_oauth2( extreme memory pressure. */ grpc_resource_quota *resource_quota = grpc_resource_quota_create("oauth2_credentials_refresh"); - grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota, - &request, body, strlen(body), deadline, - grpc_closure_create(response_cb, metadata_req), - &metadata_req->response); + grpc_httpcli_post( + exec_ctx, httpcli_context, pollent, resource_quota, &request, body, + strlen(body), deadline, + grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx), + &metadata_req->response); grpc_resource_quota_internal_unref(exec_ctx, resource_quota); gpr_free(body); } diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index 053bf5972c..da897296e4 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -303,9 +303,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { grpc_security_connector *sc = grpc_find_security_connector_in_args(args->channel_args); grpc_auth_context *auth_context = @@ -327,6 +327,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, sc, "client_auth_filter"); chand->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter"); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c index 1b278410e8..750c3675b1 100644 --- a/src/core/lib/security/transport/secure_endpoint.c +++ b/src/core/lib/security/transport/secure_endpoint.c @@ -146,7 +146,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, } } ep->read_buffer = NULL; - grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL); + grpc_closure_sched(exec_ctx, ep->read_cb, error); SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); } @@ -329,10 +329,9 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, if (result != TSI_OK) { /* TODO(yangg) do different things according to the error type? */ grpc_slice_buffer_reset_and_unref(&ep->output_buffer); - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, cb, - grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result), - NULL); + grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result)); GPR_TIMER_END("secure_endpoint.endpoint_write", 0); return; } @@ -372,7 +371,10 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) { return grpc_endpoint_get_peer(ep->wrapped_ep); } -static int endpoint_get_fd(grpc_endpoint *secure_ep) { return -1; } +static int endpoint_get_fd(grpc_endpoint *secure_ep) { + secure_endpoint *ep = (secure_endpoint *)secure_ep; + return grpc_endpoint_get_fd(ep->wrapped_ep); +} static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) { secure_endpoint *ep = (secure_endpoint *)secure_ep; @@ -414,7 +416,7 @@ grpc_endpoint *grpc_secure_endpoint_create( grpc_slice_buffer_init(&ep->output_buffer); grpc_slice_buffer_init(&ep->source_buffer); ep->read_buffer = NULL; - grpc_closure_init(&ep->on_read, on_read, ep); + grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx); gpr_mu_init(&ep->protector_mu); gpr_ref_init(&ep->ref, 1); return &ep->base; diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c index a2e0c7c7c7..17ad681c82 100644 --- a/src/core/lib/security/transport/security_connector.c +++ b/src/core/lib/security/transport/security_connector.c @@ -43,6 +43,7 @@ #include <grpc/support/string_util.h> #include "src/core/ext/transport/chttp2/alpn/alpn.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/context/security_context.h" #include "src/core/lib/security/credentials/credentials.h" @@ -111,19 +112,19 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, return NULL; } -void grpc_channel_security_connector_create_handshakers( +void grpc_channel_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->create_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(exec_ctx, connector, handshake_mgr); } } -void grpc_server_security_connector_create_handshakers( +void grpc_server_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->create_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(exec_ctx, connector, handshake_mgr); } } @@ -133,9 +134,9 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { if (sc == NULL) { - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, on_peer_checked, - GRPC_ERROR_CREATE("cannot check peer -- no security connector"), NULL); + GRPC_ERROR_CREATE("cannot check peer -- no security connector")); tsi_peer_destruct(&peer); } else { sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); @@ -272,7 +273,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx, GRPC_FAKE_TRANSPORT_SECURITY_TYPE); end: - grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); + grpc_closure_sched(exec_ctx, on_peer_checked, error); tsi_peer_destruct(&peer); } @@ -285,20 +286,24 @@ static void fake_channel_check_call_host(grpc_exec_ctx *exec_ctx, cb(exec_ctx, user_data, GRPC_SECURITY_OK); } -static void fake_channel_create_handshakers( +static void fake_channel_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr) { - grpc_security_create_handshakers( - exec_ctx, tsi_create_fake_handshaker(true /* is_client */), &sc->base, - handshake_mgr); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_fake_handshaker(true /* is_client */), + &sc->base)); } -static void fake_server_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_security_create_handshakers( - exec_ctx, tsi_create_fake_handshaker(false /* is_client */), &sc->base, - handshake_mgr); +static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_fake_handshaker(false /* is_client */), + &sc->base)); } static grpc_security_connector_vtable fake_channel_vtable = { @@ -316,7 +321,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create( c->base.vtable = &fake_channel_vtable; c->request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds); c->check_call_host = fake_channel_check_call_host; - c->create_handshakers = fake_channel_create_handshakers; + c->add_handshakers = fake_channel_add_handshakers; return c; } @@ -328,7 +333,7 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create( gpr_ref_init(&c->base.refcount, 1); c->base.vtable = &fake_server_vtable; c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; - c->create_handshakers = fake_server_create_handshakers; + c->add_handshakers = fake_server_add_handshakers; return c; } @@ -382,9 +387,9 @@ static grpc_security_status ssl_create_handshaker( return GRPC_SECURITY_OK; } -static void ssl_channel_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_ssl_channel_security_connector *c = (grpc_ssl_channel_security_connector *)sc; // Instantiate TSI handshaker. @@ -395,12 +400,13 @@ static void ssl_channel_create_handshakers( : c->target_name, &tsi_hs); // Create handshakers. - grpc_security_create_handshakers(exec_ctx, tsi_hs, &sc->base, handshake_mgr); + grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( + exec_ctx, tsi_hs, &sc->base)); } -static void ssl_server_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_ssl_server_security_connector *c = (grpc_ssl_server_security_connector *)sc; // Instantiate TSI handshaker. @@ -408,7 +414,8 @@ static void ssl_server_create_handshakers( ssl_create_handshaker(c->handshaker_factory, false /* is_client */, NULL /* peer_name */, &tsi_hs); // Create handshakers. - grpc_security_create_handshakers(exec_ctx, tsi_hs, &sc->base, handshake_mgr); + grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( + exec_ctx, tsi_hs, &sc->base)); } static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { @@ -501,7 +508,7 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx, ? c->overridden_target_name : c->target_name, &peer, auth_context); - grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); + grpc_closure_sched(exec_ctx, on_peer_checked, error); tsi_peer_destruct(&peer); } @@ -511,7 +518,7 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx, grpc_closure *on_peer_checked) { grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context); tsi_peer_destruct(&peer); - grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL); + grpc_closure_sched(exec_ctx, on_peer_checked, error); } static void add_shallow_auth_property_to_peer(tsi_peer *peer, @@ -708,7 +715,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create( c->base.request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds); c->base.check_call_host = ssl_channel_check_call_host; - c->base.create_handshakers = ssl_channel_create_handshakers; + c->base.add_handshakers = ssl_channel_add_handshakers; gpr_split_host_port(target_name, &c->target_name, &port); gpr_free(port); if (overridden_target_name != NULL) { @@ -783,7 +790,7 @@ grpc_security_status grpc_ssl_server_security_connector_create( *sc = NULL; goto error; } - c->base.create_handshakers = ssl_server_create_handshakers; + c->base.add_handshakers = ssl_server_add_handshakers; *sc = &c->base; gpr_free((void *)alpn_protocol_strings); gpr_free(alpn_protocol_string_lengths); diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h index 696db0e02e..a84b359051 100644 --- a/src/core/lib/security/transport/security_connector.h +++ b/src/core/lib/security/transport/security_connector.h @@ -98,7 +98,7 @@ void grpc_security_connector_unref(grpc_security_connector *policy); #endif /* Check the peer. Callee takes ownership of the peer object. - Sets *auth_context and invokes on_peer_checked when done. */ + When done, sets *auth_context and invokes on_peer_checked. */ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, tsi_peer peer, @@ -133,9 +133,9 @@ struct grpc_channel_security_connector { grpc_channel_security_connector *sc, const char *host, grpc_auth_context *auth_context, grpc_security_call_host_check_cb cb, void *user_data); - void (*create_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr); + void (*add_handshakers)(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr); }; /* Checks that the host that will be set for a call is acceptable. */ @@ -145,7 +145,7 @@ void grpc_channel_security_connector_check_call_host( grpc_security_call_host_check_cb cb, void *user_data); /* Registers handshakers with \a handshake_mgr. */ -void grpc_channel_security_connector_create_handshakers( +void grpc_channel_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr); @@ -158,12 +158,12 @@ typedef struct grpc_server_security_connector grpc_server_security_connector; struct grpc_server_security_connector { grpc_security_connector base; - void (*create_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr); + void (*add_handshakers)(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr); }; -void grpc_server_security_connector_create_handshakers( +void grpc_server_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, grpc_handshake_manager *handshake_mgr); diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c index fc01bec2f2..748bf4a432 100644 --- a/src/core/lib/security/transport/security_handshaker.c +++ b/src/core/lib/security/transport/security_handshaker.c @@ -131,9 +131,12 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, // Not shutting down, so the write failed. Clean up before // invoking the callback. cleanup_args_for_failure_locked(h); + // Set shutdown to true so that subsequent calls to + // security_handshaker_shutdown() do nothing. + h->shutdown = true; } // Invoke callback. - grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL); + grpc_closure_sched(exec_ctx, h->on_handshake_done, error); } static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, @@ -170,7 +173,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1); grpc_channel_args_destroy(tmp_args); // Invoke callback. - grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE); // Set shutdown to true so that subsequent calls to // security_handshaker_shutdown() do nothing. h->shutdown = true; @@ -389,10 +392,13 @@ static grpc_handshaker *security_handshaker_create( h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE; h->handshake_buffer = gpr_malloc(h->handshake_buffer_size); grpc_closure_init(&h->on_handshake_data_sent_to_peer, - on_handshake_data_sent_to_peer, h); + on_handshake_data_sent_to_peer, h, + grpc_schedule_on_exec_ctx); grpc_closure_init(&h->on_handshake_data_received_from_peer, - on_handshake_data_received_from_peer, h); - grpc_closure_init(&h->on_peer_checked, on_peer_checked, h); + on_handshake_data_received_from_peer, h, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&h->on_peer_checked, on_peer_checked, h, + grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&h->left_overs); grpc_slice_buffer_init(&h->outgoing); return &h->base; @@ -415,9 +421,8 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, grpc_tcp_server_acceptor *acceptor, grpc_closure *on_handshake_done, grpc_handshaker_args *args) { - grpc_exec_ctx_sched(exec_ctx, on_handshake_done, - GRPC_ERROR_CREATE("Failed to create security handshaker"), - NULL); + grpc_closure_sched(exec_ctx, on_handshake_done, + GRPC_ERROR_CREATE("Failed to create security handshaker")); } static const grpc_handshaker_vtable fail_handshaker_vtable = { @@ -434,17 +439,14 @@ static grpc_handshaker *fail_handshaker_create() { // exported functions // -void grpc_security_create_handshakers(grpc_exec_ctx *exec_ctx, - tsi_handshaker *handshaker, - grpc_security_connector *connector, - grpc_handshake_manager *handshake_mgr) { - // If no TSI handshaker was created, add a handshaker that always fails. - // Otherwise, add a real security handshaker. +grpc_handshaker *grpc_security_handshaker_create( + grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, + grpc_security_connector *connector) { + // If no TSI handshaker was created, return a handshaker that always fails. + // Otherwise, return a real security handshaker. if (handshaker == NULL) { - grpc_handshake_manager_add(handshake_mgr, fail_handshaker_create()); + return fail_handshaker_create(); } else { - grpc_handshake_manager_add( - handshake_mgr, - security_handshaker_create(exec_ctx, handshaker, connector)); + return security_handshaker_create(exec_ctx, handshaker, connector); } } diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h index f71f43a359..5ddbf4b451 100644 --- a/src/core/lib/security/transport/security_handshaker.h +++ b/src/core/lib/security/transport/security_handshaker.h @@ -34,14 +34,13 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H -#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/channel/handshaker.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/security/transport/security_connector.h" -/// Creates any necessary security handshakers and adds them to -/// \a handshake_mgr. -void grpc_security_create_handshakers(grpc_exec_ctx *exec_ctx, - tsi_handshaker *handshaker, - grpc_security_connector *connector, - grpc_handshake_manager *handshake_mgr); +/// Creates a security handshaker using \a handshaker. +grpc_handshaker *grpc_security_handshaker_create( + grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, + grpc_security_connector *connector); #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H */ diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index eaa1d0720b..5b4adc4661 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -132,7 +132,7 @@ static void on_md_processing_done( grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md, elem); grpc_metadata_array_destroy(&calld->md); - grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE); } else { grpc_slice message; grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op)); @@ -148,13 +148,13 @@ static void on_md_processing_done( calld->transport_op->send_message = NULL; } calld->transport_op->send_trailing_metadata = NULL; - close_op->on_complete = grpc_closure_create(destroy_op, close_op); + close_op->on_complete = + grpc_closure_create(destroy_op, close_op, grpc_schedule_on_exec_ctx); grpc_transport_stream_op_add_close(close_op, status, &message); grpc_call_next_op(&exec_ctx, elem, close_op); - grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, - grpc_error_set_int(GRPC_ERROR_CREATE(error_details), - GRPC_ERROR_INT_GRPC_STATUS, status), - NULL); + grpc_closure_sched(&exec_ctx, calld->on_done_recv, + grpc_error_set_int(GRPC_ERROR_CREATE(error_details), + GRPC_ERROR_INT_GRPC_STATUS, status)); } grpc_exec_ctx_finish(&exec_ctx); @@ -174,8 +174,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, return; } } - grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error), - NULL); + grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error)); } static void set_recv_ops_md_callbacks(grpc_call_element *elem, @@ -214,7 +213,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* initialize members */ memset(calld, 0, sizeof(*calld)); - grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem); + grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem, + grpc_schedule_on_exec_ctx); if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) { args->context[GRPC_CONTEXT_SECURITY].destroy( @@ -238,9 +238,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, void *ignored) {} /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args->channel_args); grpc_server_credentials *creds = @@ -256,6 +256,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, chand->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter"); chand->creds = grpc_server_credentials_ref(creds); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c index f10a30f0fd..426fce28f8 100644 --- a/src/core/lib/support/string.c +++ b/src/core/lib/support/string.c @@ -275,3 +275,15 @@ int gpr_stricmp(const char *a, const char *b) { } while (ca == cb && ca && cb); return ca - cb; } + +void *gpr_memrchr(const void *s, int c, size_t n) { + if (s == NULL) return NULL; + char *b = (char *)s; + size_t i; + for (i = 0; i < n; i++) { + if (b[n - i - 1] == c) { + return &b[n - i - 1]; + } + } + return NULL; +} diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h index e933e2eb46..6d1f7cc632 100644 --- a/src/core/lib/support/string.h +++ b/src/core/lib/support/string.h @@ -118,6 +118,8 @@ char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length); lower(a)==lower(b), >0 if lower(a)>lower(b) */ int gpr_stricmp(const char *a, const char *b); +void *gpr_memrchr(const void *s, int c, size_t n); + #ifdef __cplusplus } #endif diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 1e0f3eeca5..b20801005a 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -794,7 +794,8 @@ static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) { memset(&tc->op, 0, sizeof(tc->op)); tc->op.cancel_error = tc->error; /* reuse closure to catch completion */ - grpc_closure_init(&tc->closure, done_termination, tc); + grpc_closure_init(&tc->closure, done_termination, tc, + grpc_schedule_on_exec_ctx); tc->op.on_complete = &tc->closure; execute_op(exec_ctx, tc->call, &tc->op); } @@ -804,7 +805,8 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) { memset(&tc->op, 0, sizeof(tc->op)); tc->op.close_error = tc->error; /* reuse closure to catch completion */ - grpc_closure_init(&tc->closure, done_termination, tc); + grpc_closure_init(&tc->closure, done_termination, tc, + grpc_schedule_on_exec_ctx); tc->op.on_complete = &tc->closure; execute_op(exec_ctx, tc->call, &tc->op); } @@ -814,13 +816,13 @@ static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx, set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error); if (tc->type == TC_CANCEL) { - grpc_closure_init(&tc->closure, send_cancel, tc); + grpc_closure_init(&tc->closure, send_cancel, tc, grpc_schedule_on_exec_ctx); GRPC_CALL_INTERNAL_REF(tc->call, "cancel"); } else if (tc->type == TC_CLOSE) { - grpc_closure_init(&tc->closure, send_close, tc); + grpc_closure_init(&tc->closure, send_close, tc, grpc_schedule_on_exec_ctx); GRPC_CALL_INTERNAL_REF(tc->call, "close"); } - grpc_exec_ctx_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE); return GRPC_CALL_OK; } @@ -1138,8 +1140,8 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx, } else { *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0); } - grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, - bctl); + grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl, + grpc_schedule_on_exec_ctx); continue_receiving_slices(exec_ctx, bctl); } } @@ -1251,9 +1253,10 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, call->has_initial_md_been_received = true; if (call->saved_receiving_stream_ready_bctlp != NULL) { grpc_closure *saved_rsr_closure = grpc_closure_create( - receiving_stream_ready, call->saved_receiving_stream_ready_bctlp); + receiving_stream_ready, call->saved_receiving_stream_ready_bctlp, + grpc_schedule_on_exec_ctx); call->saved_receiving_stream_ready_bctlp = NULL; - grpc_exec_ctx_sched(exec_ctx, saved_rsr_closure, error, NULL); + grpc_closure_sched(exec_ctx, saved_rsr_closure, error); } gpr_mu_unlock(&call->mu); @@ -1551,10 +1554,15 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS; goto done_with_error; } + /* IF this is a server, then GRPC_OP_RECV_INITIAL_METADATA *must* come + from server.c. In that case, it's coming from accept_stream, and in + that case we're not necessarily covered by a poller. */ + stream_op->covered_by_poller = call->is_client; call->received_initial_metadata = 1; call->buffered_metadata[0] = op->data.recv_initial_metadata; grpc_closure_init(&call->receiving_initial_metadata_ready, - receiving_initial_metadata_ready, bctl); + receiving_initial_metadata_ready, bctl, + grpc_schedule_on_exec_ctx); bctl->recv_initial_metadata = 1; stream_op->recv_initial_metadata = &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; @@ -1577,7 +1585,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, call->receiving_buffer = op->data.recv_message; stream_op->recv_message = &call->receiving_stream; grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready, - bctl); + bctl, grpc_schedule_on_exec_ctx); stream_op->recv_message_ready = &call->receiving_stream_ready; num_completion_callbacks_needed++; break; @@ -1642,7 +1650,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed); stream_op->context = call->context; - grpc_closure_init(&bctl->finish_batch, finish_batch, bctl); + grpc_closure_init(&bctl->finish_batch, finish_batch, bctl, + grpc_schedule_on_exec_ctx); stream_op->on_complete = &bctl->finish_batch; gpr_mu_unlock(&call->mu); diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c index 1389df6886..9405015c50 100644 --- a/src/core/lib/surface/channel.c +++ b/src/core/lib/surface/channel.c @@ -86,87 +86,90 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, const grpc_channel_args *input_args, grpc_channel_stack_type channel_stack_type, grpc_transport *optional_transport) { - bool is_client = grpc_channel_stack_type_is_client(channel_stack_type); - grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder_set_channel_arguments(builder, input_args); grpc_channel_stack_builder_set_target(builder, target); grpc_channel_stack_builder_set_transport(builder, optional_transport); - grpc_channel *channel; - grpc_channel_args *args; if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) { grpc_channel_stack_builder_destroy(builder); return NULL; - } else { - args = grpc_channel_args_copy( - grpc_channel_stack_builder_get_channel_arguments(builder)); - channel = grpc_channel_stack_builder_finish( - exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL); + } + grpc_channel_args *args = grpc_channel_args_copy( + grpc_channel_stack_builder_get_channel_arguments(builder)); + grpc_channel *channel; + grpc_error *error = grpc_channel_stack_builder_finish( + exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL, + (void **)&channel); + if (error != GRPC_ERROR_NONE) { + const char *msg = grpc_error_string(error); + gpr_log(GPR_ERROR, "channel stack builder failed: %s", msg); + grpc_error_free_string(msg); + GRPC_ERROR_UNREF(error); + goto done; } memset(channel, 0, sizeof(*channel)); channel->target = gpr_strdup(target); - channel->is_client = is_client; + channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type); gpr_mu_init(&channel->registered_call_mu); channel->registered_calls = NULL; grpc_compression_options_init(&channel->compression_options); - if (args) { - for (size_t i = 0; i < args->num_args; i++) { - if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", - GRPC_ARG_DEFAULT_AUTHORITY); - } else { - if (channel->default_authority) { - /* setting this takes precedence over anything else */ - GRPC_MDELEM_UNREF(channel->default_authority); - } - channel->default_authority = grpc_mdelem_from_strings( - ":authority", args->args[i].value.string); + + for (size_t i = 0; i < args->num_args; i++) { + if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { + if (args->args[i].type != GRPC_ARG_STRING) { + gpr_log(GPR_ERROR, "%s ignored: it must be a string", + GRPC_ARG_DEFAULT_AUTHORITY); + } else { + if (channel->default_authority) { + /* setting this takes precedence over anything else */ + GRPC_MDELEM_UNREF(channel->default_authority); } - } else if (0 == - strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", + channel->default_authority = + grpc_mdelem_from_strings(":authority", args->args[i].value.string); + } + } else if (0 == + strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { + if (args->args[i].type != GRPC_ARG_STRING) { + gpr_log(GPR_ERROR, "%s ignored: it must be a string", + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); + } else { + if (channel->default_authority) { + /* other ways of setting this (notably ssl) take precedence */ + gpr_log(GPR_ERROR, + "%s ignored: default host already set some other way", GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); } else { - if (channel->default_authority) { - /* other ways of setting this (notably ssl) take precedence */ - gpr_log(GPR_ERROR, - "%s ignored: default host already set some other way", - GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); - } else { - channel->default_authority = grpc_mdelem_from_strings( - ":authority", args->args[i].value.string); - } + channel->default_authority = grpc_mdelem_from_strings( + ":authority", args->args[i].value.string); } - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { - channel->compression_options.default_level.is_set = true; - GPR_ASSERT(args->args[i].value.integer >= 0 && - args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT); - channel->compression_options.default_level.level = - (grpc_compression_level)args->args[i].value.integer; - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { - channel->compression_options.default_algorithm.is_set = true; - GPR_ASSERT(args->args[i].value.integer >= 0 && - args->args[i].value.integer < - GRPC_COMPRESS_ALGORITHMS_COUNT); - channel->compression_options.default_algorithm.algorithm = - (grpc_compression_algorithm)args->args[i].value.integer; - } else if (0 == - strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { - channel->compression_options.enabled_algorithms_bitset = - (uint32_t)args->args[i].value.integer | - 0x1; /* always support no compression */ } + } else if (0 == strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { + channel->compression_options.default_level.is_set = true; + GPR_ASSERT(args->args[i].value.integer >= 0 && + args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT); + channel->compression_options.default_level.level = + (grpc_compression_level)args->args[i].value.integer; + } else if (0 == strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { + channel->compression_options.default_algorithm.is_set = true; + GPR_ASSERT(args->args[i].value.integer >= 0 && + args->args[i].value.integer < GRPC_COMPRESS_ALGORITHMS_COUNT); + channel->compression_options.default_algorithm.algorithm = + (grpc_compression_algorithm)args->args[i].value.integer; + } else if (0 == + strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { + channel->compression_options.enabled_algorithms_bitset = + (uint32_t)args->args[i].value.integer | + 0x1; /* always support no compression */ } - grpc_channel_args_destroy(args); } +done: + grpc_channel_args_destroy(args); return channel; } diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c index 0d2f01a649..e68febdddf 100644 --- a/src/core/lib/surface/channel_ping.c +++ b/src/core/lib/surface/channel_ping.c @@ -71,7 +71,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, GPR_ASSERT(reserved == NULL); pr->tag = tag; pr->cq = cq; - grpc_closure_init(&pr->closure, ping_done, pr); + grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx); op->send_ping = &pr->closure; op->bind_pollset = grpc_cq_pollset(cq); grpc_cq_begin_op(cq, tag); diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c index 4e0feb56ac..4613c9021e 100644 --- a/src/core/lib/surface/completion_queue.c +++ b/src/core/lib/surface/completion_queue.c @@ -168,7 +168,8 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) { #ifndef NDEBUG cc->outstanding_tag_count = 0; #endif - grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc); + grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc, + grpc_schedule_on_exec_ctx); GPR_TIMER_END("grpc_completion_queue_create", 0); @@ -354,11 +355,13 @@ static void dump_pending_tags(grpc_completion_queue *cc) { gpr_strvec v; gpr_strvec_init(&v); gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:")); + gpr_mu_lock(cc->mu); for (size_t i = 0; i < cc->outstanding_tag_count; i++) { char *s; gpr_asprintf(&s, " %p", cc->outstanding_tags[i]); gpr_strvec_add(&v, s); } + gpr_mu_unlock(cc->mu); char *out = gpr_strvec_flatten(&v, NULL); gpr_strvec_destroy(&v); gpr_log(GPR_DEBUG, "%s", out); diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c index d0df8e7e17..f1ad13711a 100644 --- a/src/core/lib/surface/lame_client.c +++ b/src/core/lib/surface/lame_client.c @@ -98,16 +98,16 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx, if (op->on_connectivity_state_change) { GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN); *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN; - grpc_exec_ctx_sched(exec_ctx, op->on_connectivity_state_change, - GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, op->on_connectivity_state_change, + GRPC_ERROR_NONE); } if (op->send_ping != NULL) { - grpc_exec_ctx_sched(exec_ctx, op->send_ping, - GRPC_ERROR_CREATE("lame client channel"), NULL); + grpc_closure_sched(exec_ctx, op->send_ping, + GRPC_ERROR_CREATE("lame client channel")); } GRPC_ERROR_UNREF(op->disconnect_with_error); if (op->on_consumed != NULL) { - grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); } } @@ -123,11 +123,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, gpr_free(and_free_memory); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index fe73aa375c..78699e9e65 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -278,7 +278,8 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg, static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, int send_goaway, grpc_error *send_disconnect) { struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc)); - grpc_closure_init(&sc->closure, shutdown_cleanup, sc); + grpc_closure_init(&sc->closure, shutdown_cleanup, sc, + grpc_schedule_on_exec_ctx); grpc_transport_op *op = grpc_make_transport_op(&sc->closure); grpc_channel_element *elem; @@ -346,9 +347,9 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&calld->mu_state); grpc_closure_init( &calld->kill_zombie_closure, kill_zombie, - grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); - grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE, - NULL); + grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); } } @@ -440,8 +441,8 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand, orphan_channel(chand); server_ref(chand->server); maybe_finish_shutdown(exec_ctx, chand->server); - chand->finish_destroy_channel_closure.cb = finish_destroy_channel; - chand->finish_destroy_channel_closure.cb_arg = chand; + grpc_closure_init(&chand->finish_destroy_channel_closure, + finish_destroy_channel, chand, grpc_schedule_on_exec_ctx); if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) { const char *msg = grpc_error_string(error); @@ -545,8 +546,9 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, gpr_mu_unlock(&calld->mu_state); grpc_closure_init( &calld->kill_zombie_closure, kill_zombie, - grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); - grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, error, NULL); + grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error); return; } @@ -590,9 +592,9 @@ static void finish_start_new_rpc( gpr_mu_lock(&calld->mu_state); calld->state = ZOMBIED; gpr_mu_unlock(&calld->mu_state); - grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); - grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE, - NULL); + grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); return; } @@ -607,7 +609,8 @@ static void finish_start_new_rpc( memset(&op, 0, sizeof(op)); op.op = GRPC_OP_RECV_MESSAGE; op.data.recv_message = &calld->payload; - grpc_closure_init(&calld->publish, publish_new_rpc, elem); + grpc_closure_init(&calld->publish, publish_new_rpc, elem, + grpc_schedule_on_exec_ctx); grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1, &calld->publish); break; @@ -813,9 +816,10 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, if (calld->state == NOT_STARTED) { calld->state = ZOMBIED; gpr_mu_unlock(&calld->mu_state); - grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); - grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE, NULL); + grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem, + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, + GRPC_ERROR_NONE); } else if (calld->state == PENDING) { calld->state = ZOMBIED; gpr_mu_unlock(&calld->mu_state); @@ -851,7 +855,8 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, memset(&op, 0, sizeof(op)); op.op = GRPC_OP_RECV_INITIAL_METADATA; op.data.recv_initial_metadata = &calld->initial_metadata; - grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem); + grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem, + grpc_schedule_on_exec_ctx); grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1, &calld->got_initial_metadata); } @@ -887,7 +892,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, gpr_mu_init(&calld->mu_state); grpc_closure_init(&calld->server_on_recv_initial_metadata, - server_on_recv_initial_metadata, elem); + server_on_recv_initial_metadata, elem, + grpc_schedule_on_exec_ctx); server_ref(chand->server); return GRPC_ERROR_NONE; @@ -914,9 +920,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, server_unref(exec_ctx, chand->server); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(args->is_first); GPR_ASSERT(!args->is_last); @@ -926,7 +932,9 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, chand->registered_methods = NULL; chand->connectivity_state = GRPC_CHANNEL_IDLE; grpc_closure_init(&chand->channel_connectivity_changed, - channel_connectivity_changed, chand); + channel_connectivity_changed, chand, + grpc_schedule_on_exec_ctx); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, @@ -1277,7 +1285,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server, /* Shutdown listeners */ for (l = server->listeners; l; l = l->next) { - grpc_closure_init(&l->destroy_done, listener_destroy_done, server); + grpc_closure_init(&l->destroy_done, listener_destroy_done, server, + grpc_schedule_on_exec_ctx); l->destroy(&exec_ctx, server, l->arg, &l->destroy_done); } @@ -1383,9 +1392,10 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&calld->mu_state); grpc_closure_init( &calld->kill_zombie_closure, kill_zombie, - grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); - grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE, NULL); + grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), + grpc_schedule_on_exec_ctx); + grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, + GRPC_ERROR_NONE); } else { GPR_ASSERT(calld->state == PENDING); calld->state = ACTIVATED; diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c index 84f0a083bc..f49dd8584b 100644 --- a/src/core/lib/surface/validate_metadata.c +++ b/src/core/lib/surface/validate_metadata.c @@ -53,7 +53,7 @@ int grpc_header_key_is_legal(const char *key, size_t length) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00, 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - if (length == 0) { + if (length == 0 || key[0] == ':') { return 0; } return conforms_to(key, length, legal_header_bits); diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c index 4f49d7cf7d..c656d93740 100644 --- a/src/core/lib/transport/connectivity_state.c +++ b/src/core/lib/transport/connectivity_state.c @@ -81,7 +81,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, } else { error = GRPC_ERROR_CREATE("Shutdown connectivity owner"); } - grpc_exec_ctx_sched(exec_ctx, w->notify, error, NULL); + grpc_closure_sched(exec_ctx, w->notify, error); gpr_free(w); } GRPC_ERROR_UNREF(tracker->current_error); @@ -121,7 +121,7 @@ bool grpc_connectivity_state_notify_on_state_change( if (current == NULL) { grpc_connectivity_state_watcher *w = tracker->watchers; if (w != NULL && w->notify == notify) { - grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED); tracker->watchers = w->next; gpr_free(w); return false; @@ -129,7 +129,7 @@ bool grpc_connectivity_state_notify_on_state_change( while (w != NULL) { grpc_connectivity_state_watcher *rm_candidate = w->next; if (rm_candidate != NULL && rm_candidate->notify == notify) { - grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL); + grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED); w->next = w->next->next; gpr_free(rm_candidate); return false; @@ -140,8 +140,8 @@ bool grpc_connectivity_state_notify_on_state_change( } else { if (tracker->current_state != *current) { *current = tracker->current_state; - grpc_exec_ctx_sched(exec_ctx, notify, - GRPC_ERROR_REF(tracker->current_error), NULL); + grpc_closure_sched(exec_ctx, notify, + GRPC_ERROR_REF(tracker->current_error)); } else { grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w)); w->current = current; @@ -191,8 +191,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify); } - grpc_exec_ctx_sched(exec_ctx, w->notify, - GRPC_ERROR_REF(tracker->current_error), NULL); + grpc_closure_sched(exec_ctx, w->notify, + GRPC_ERROR_REF(tracker->current_error)); gpr_free(w); } } diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c index b448126da8..0d24062c1e 100644 --- a/src/core/lib/transport/transport.c +++ b/src/core/lib/transport/transport.c @@ -68,7 +68,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount) { #endif if (gpr_unref(&refcount->refs)) { - grpc_exec_ctx_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE); } } @@ -82,7 +82,7 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, grpc_iomgr_cb_func cb, void *cb_arg) { #endif gpr_ref_init(&refcount->refs, initial_refs); - grpc_closure_init(&refcount->destroy, cb, cb_arg); + grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx); } static void move64(uint64_t *from, uint64_t *to) { @@ -168,11 +168,10 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op, grpc_error *error) { - grpc_exec_ctx_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error), - NULL); - grpc_exec_ctx_sched(exec_ctx, op->recv_initial_metadata_ready, - GRPC_ERROR_REF(error), NULL); - grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL); + grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error)); + grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready, + GRPC_ERROR_REF(error)); + grpc_closure_sched(exec_ctx, op->on_complete, error); } typedef struct { @@ -196,7 +195,8 @@ static void add_error(grpc_transport_stream_op *op, grpc_error **which, cmd = gpr_malloc(sizeof(*cmd)); cmd->error = error; cmd->then_call = op->on_complete; - grpc_closure_init(&cmd->closure, free_message, cmd); + grpc_closure_init(&cmd->closure, free_message, cmd, + grpc_schedule_on_exec_ctx); op->on_complete = &cmd->closure; *which = error; } @@ -269,14 +269,14 @@ typedef struct { static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { made_transport_op *op = arg; - grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error), - NULL); + grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); gpr_free(op); } grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) { made_transport_op *op = gpr_malloc(sizeof(*op)); - grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op); + grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op, + grpc_schedule_on_exec_ctx); op->inner_on_complete = on_complete; memset(&op->op, 0, sizeof(op->op)); op->op.on_consumed = &op->outer_on_complete; @@ -292,8 +292,7 @@ typedef struct { static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { made_transport_stream_op *op = arg; - grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error), - NULL); + grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); gpr_free(op); } @@ -301,7 +300,7 @@ grpc_transport_stream_op *grpc_make_transport_stream_op( grpc_closure *on_complete) { made_transport_stream_op *op = gpr_malloc(sizeof(*op)); grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op, - op); + op, grpc_schedule_on_exec_ctx); op->inner_on_complete = on_complete; memset(&op->op, 0, sizeof(op->op)); op->op.on_complete = &op->outer_on_complete; diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h index e420efc71c..c9f50df732 100644 --- a/src/cpp/common/channel_filter.h +++ b/src/cpp/common/channel_filter.h @@ -216,12 +216,13 @@ class TransportStreamOp { /// Represents channel data. class ChannelData { public: - virtual ~ChannelData() { - if (peer_) gpr_free((void *)peer_); - } + virtual ~ChannelData() {} - /// Caller does NOT take ownership of result. - const char *peer() const { return peer_; } + /// Initializes the call data. + virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; + } // TODO(roth): Find a way to avoid passing elem into these methods. @@ -232,11 +233,7 @@ class ChannelData { const grpc_channel_info *channel_info); protected: - /// Takes ownership of \a peer. - ChannelData(const grpc_channel_args &args, const char *peer) : peer_(peer) {} - - private: - const char *peer_; + ChannelData() {} }; /// Represents call data. @@ -245,7 +242,10 @@ class CallData { virtual ~CallData() {} /// Initializes the call data. - virtual grpc_error *Init() { return GRPC_ERROR_NONE; } + virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, ChannelData *channel_data, + grpc_call_element_args *args) { + return GRPC_ERROR_NONE; + } // TODO(roth): Find a way to avoid passing elem into these methods. @@ -263,7 +263,7 @@ class CallData { virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); protected: - explicit CallData(const ChannelData &) {} + CallData() {} }; namespace internal { @@ -276,15 +276,11 @@ class ChannelFilter final { public: static const size_t channel_data_size = sizeof(ChannelDataType); - static void InitChannelElement(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - const char *peer = - args->optional_transport - ? grpc_transport_get_peer(exec_ctx, args->optional_transport) - : nullptr; - // Construct the object in the already-allocated memory. - new (elem->channel_data) ChannelDataType(*args->channel_args, peer); + static grpc_error *InitChannelElement(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + ChannelDataType *channel_data = new (elem->channel_data) ChannelDataType(); + return channel_data->Init(exec_ctx, args); } static void DestroyChannelElement(grpc_exec_ctx *exec_ctx, @@ -312,11 +308,10 @@ class ChannelFilter final { static grpc_error *InitCallElement(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_call_element_args *args) { - const ChannelDataType &channel_data = - *(ChannelDataType *)elem->channel_data; + ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data; // Construct the object in the already-allocated memory. - CallDataType *call_data = new (elem->call_data) CallDataType(channel_data); - return call_data->Init(); + CallDataType *call_data = new (elem->call_data) CallDataType(); + return call_data->Init(exec_ctx, channel_data, args); } static void DestroyCallElement(grpc_exec_ctx *exec_ctx, diff --git a/src/cpp/common/completion_queue_cc.cc b/src/cpp/common/completion_queue_cc.cc index 00cc102f92..0408a41085 100644 --- a/src/cpp/common/completion_queue_cc.cc +++ b/src/cpp/common/completion_queue_cc.cc @@ -43,11 +43,21 @@ namespace grpc { static internal::GrpcLibraryInitializer g_gli_initializer; -CompletionQueue::CompletionQueue(grpc_completion_queue* take) : cq_(take) {} +CompletionQueue::CompletionQueue(grpc_completion_queue* take) : cq_(take) { + InitialAvalanching(); +} void CompletionQueue::Shutdown() { g_gli_initializer.summon(); - grpc_completion_queue_shutdown(cq_); + CompleteAvalanching(); +} + +void CompletionQueue::CompleteAvalanching() { + // Check if this was the last avalanching operation + if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_, + static_cast<gpr_atm>(-1)) == 1) { + grpc_completion_queue_shutdown(cq_); + } } CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal( diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index b7cfd6dbf1..817d85a81c 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -510,12 +510,6 @@ void Server::ShutdownInternal(gpr_timespec deadline) { ShutdownTag shutdown_tag; // Dummy shutdown tag grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag); - // Shutdown all ThreadManagers. This will try to gracefully stop all the - // threads in the ThreadManagers (once they process any inflight requests) - for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { - (*it)->Shutdown(); // ThreadManager's Shutdown() - } - shutdown_cq.Shutdown(); void* tag; @@ -531,6 +525,12 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has // successfully shutdown + // Shutdown all ThreadManagers. This will try to gracefully stop all the + // threads in the ThreadManagers (once they process any inflight requests) + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Shutdown(); // ThreadManager's Shutdown() + } + // Wait for threads in all ThreadManagers to terminate for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { (*it)->Wait(); @@ -575,9 +575,14 @@ ServerInterface::BaseAsyncRequest::BaseAsyncRequest( tag_(tag), delete_on_finalize_(delete_on_finalize), call_(nullptr) { + call_cq_->RegisterAvalanching(); // This op will trigger more ops memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_)); } +ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() { + call_cq_->CompleteAvalanching(); +} + bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag, bool* status) { if (*status) { diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj index 99e8c1a3da..db55ed5a6c 100644 --- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj +++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <ConsolePause>false</ConsolePause> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj index 19a68ab9ea..646effe21a 100644 --- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj +++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj @@ -25,15 +25,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="nunit.framework"> diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj index 5bfb978ca6..23e1ddcf7f 100644 --- a/src/csharp/Grpc.Core/Grpc.Core.csproj +++ b/src/csharp/Grpc.Core/Grpc.Core.csproj @@ -27,16 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <DefineConstants>SIGNED</DefineConstants> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Interactive.Async"> diff --git a/src/csharp/Grpc.Core/NativeDeps.targets b/src/csharp/Grpc.Core/NativeDeps.targets index 66c5ec1292..e187f72d26 100644 --- a/src/csharp/Grpc.Core/NativeDeps.targets +++ b/src/csharp/Grpc.Core/NativeDeps.targets @@ -4,13 +4,11 @@ <PropertyGroup Condition=" '$(NativeDependenciesConfiguration)' == '' "> <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'Debug' ">Debug</NativeDependenciesConfiguration> <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'Release' ">Release</NativeDependenciesConfiguration> - <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'ReleaseSigned' ">Release</NativeDependenciesConfiguration> </PropertyGroup> <PropertyGroup Condition=" '$(NativeDependenciesConfigurationUnix)' == '' "> <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'Debug' ">dbg</NativeDependenciesConfigurationUnix> <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'Release' ">opt</NativeDependenciesConfigurationUnix> - <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'ReleaseSigned' ">opt</NativeDependenciesConfigurationUnix> </PropertyGroup> <!-- Autodetect platform --> diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj index 65bf236def..de4005c2f6 100644 --- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj +++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj index 26b42b6936..3f38de2b71 100644 --- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj +++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj index 16d7a44f92..d22fe87825 100644 --- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj +++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj @@ -25,15 +25,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="nunit.framework"> diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj index c1ea2e2833..44acb6c2e3 100644 --- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj +++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="nunit.framework"> <HintPath>..\packages\NUnit.3.2.0\lib\net45\nunit.framework.dll</HintPath> diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs index 8b431c7218..3364b8ce8e 100644 --- a/src/csharp/Grpc.Examples/MathGrpc.cs +++ b/src/csharp/Grpc.Examples/MathGrpc.cs @@ -85,39 +85,53 @@ namespace Math { public abstract partial class MathBase { /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task DivMany(IAsyncStreamReader<global::Math.DivArgs> requestStream, IServerStreamWriter<global::Math.DivReply> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task Fib(global::Math.FibArgs request, IServerStreamWriter<global::Math.Num> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Math.Num> Sum(IAsyncStreamReader<global::Math.Num> requestStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -149,87 +163,123 @@ namespace Math { } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Math.DivReply Div(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Div(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Math.DivReply Div(global::Math.DivArgs request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_Div, null, options, request); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return DivAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_Div, null, options, request); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return DivMany(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_DivMany, null, options); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Fib(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_Fib, null, options, request); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Sum(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(CallOptions options) { return CallInvoker.AsyncClientStreamingCall(__Method_Sum, null, options); @@ -242,6 +292,7 @@ namespace Math { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(MathBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj index 93c3b3a55f..b82f976861 100644 --- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj +++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj index 7418768316..63aa18584d 100644 --- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj +++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj @@ -28,15 +28,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs index ad5cf11b75..020c2df565 100644 --- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs +++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs @@ -115,6 +115,7 @@ namespace Grpc.Health.V1 { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(HealthBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj index ae58073b52..6bb5f33966 100644 --- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj index 593bf0939d..3b9587e315 100644 --- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj +++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj @@ -27,15 +27,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj index d5c40ba948..081dc24fbf 100644 --- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj index 8bd3d78913..0f28340450 100644 --- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj +++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj @@ -27,15 +27,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj index 161b015300..f7abcf8046 100644 --- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj +++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj @@ -28,15 +28,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs index d0bf0afc1d..8b58622d53 100644 --- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs @@ -76,17 +76,24 @@ namespace Grpc.Testing { public abstract partial class MetricsServiceBase { /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task GetAllGauges(global::Grpc.Testing.EmptyMessage request, IServerStreamWriter<global::Grpc.Testing.GaugeResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.GaugeResponse> GetGauge(global::Grpc.Testing.GaugeRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -118,45 +125,69 @@ namespace Grpc.Testing { } /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetAllGauges(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_GetAllGauges, null, options, request); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetGauge(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_GetGauge, null, options, request); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetGaugeAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_GetGauge, null, options, request); @@ -169,6 +200,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(MetricsServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs index 3cc4ed9f3c..5135d9ab66 100644 --- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs @@ -71,18 +71,25 @@ namespace Grpc.Testing { public abstract partial class BenchmarkServiceBase { /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task StreamingCall(IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -114,49 +121,71 @@ namespace Grpc.Testing { } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_StreamingCall, null, options); @@ -169,6 +198,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -227,42 +257,56 @@ namespace Grpc.Testing { public abstract partial class WorkerServiceBase { /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task RunServer(IAsyncStreamReader<global::Grpc.Testing.ServerArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ServerStatus> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task RunClient(IAsyncStreamReader<global::Grpc.Testing.ClientArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ClientStatus> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.CoreResponse> CoreCount(global::Grpc.Testing.CoreRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> QuitWorker(global::Grpc.Testing.Void request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -294,105 +338,149 @@ namespace Grpc.Testing { } /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return RunServer(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_RunServer, null, options); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return RunClient(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_RunClient, null, options); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CoreCount(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_CoreCount, null, options, request); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CoreCountAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_CoreCount, null, options, request); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return QuitWorker(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_QuitWorker, null, options, request); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return QuitWorkerAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_QuitWorker, null, options, request); @@ -405,6 +493,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(WorkerServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs index 43dbc2865f..0265f8e821 100644 --- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs @@ -42,8 +42,8 @@ using Grpc.Core; namespace Grpc.Testing { /// <summary> - /// A simple service to test the various types of RPCs and experiment with - /// performance with various types of payload. + /// A simple service to test the various types of RPCs and experiment with + /// performance with various types of payload. /// </summary> public static partial class TestService { @@ -123,74 +123,101 @@ namespace Grpc.Testing { public abstract partial class TestServiceBase { /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> EmptyCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(IAsyncStreamReader<global::Grpc.Testing.StreamingInputCallRequest> requestStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task FullDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task HalfDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -222,195 +249,285 @@ namespace Grpc.Testing { } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return EmptyCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_EmptyCall, null, options, request); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return EmptyCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_EmptyCall, null, options, request); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CacheableUnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_CacheableUnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CacheableUnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_CacheableUnaryCall, null, options, request); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingOutputCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_StreamingOutputCall, null, options, request); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingInputCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(CallOptions options) { return CallInvoker.AsyncClientStreamingCall(__Method_StreamingInputCall, null, options); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return FullDuplexCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_FullDuplexCall, null, options); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return HalfDuplexCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_HalfDuplexCall, null, options); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request); @@ -423,6 +540,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(TestServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -438,8 +556,8 @@ namespace Grpc.Testing { } /// <summary> - /// A simple service NOT implemented at servers so clients can test for - /// that case. + /// A simple service NOT implemented at servers so clients can test for + /// that case. /// </summary> public static partial class UnimplementedService { @@ -464,8 +582,11 @@ namespace Grpc.Testing { public abstract partial class UnimplementedServiceBase { /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -497,29 +618,45 @@ namespace Grpc.Testing { } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request); @@ -532,6 +669,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -540,7 +678,7 @@ namespace Grpc.Testing { } /// <summary> - /// A service used to control reconnect server. + /// A service used to control reconnect server. /// </summary> public static partial class ReconnectService { @@ -648,6 +786,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj index cebcf59ce8..c5918b194e 100644 --- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj +++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj index ea65998ce3..4e254a0b53 100644 --- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj +++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj @@ -28,15 +28,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs index 1b6f96ce7c..5bd7558be5 100644 --- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs +++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs @@ -64,9 +64,13 @@ namespace Grpc.Reflection.V1Alpha { public abstract partial class ServerReflectionBase { /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task ServerReflectionInfo(IAsyncStreamReader<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> requestStream, IServerStreamWriter<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -98,17 +102,23 @@ namespace Grpc.Reflection.V1Alpha { } /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return ServerReflectionInfo(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_ServerReflectionInfo, null, options); @@ -121,6 +131,7 @@ namespace Grpc.Reflection.V1Alpha { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(ServerReflectionBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.sln b/src/csharp/Grpc.sln index 2e6a8fd435..179e731380 100644 --- a/src/csharp/Grpc.sln +++ b/src/csharp/Grpc.sln @@ -44,105 +44,72 @@ Global GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
- ReleaseSigned|Any CPU = ReleaseSigned|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Release|Any CPU.Build.0 = Release|Any CPU
- {143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Release|Any CPU.Build.0 = Release|Any CPU
- {3D166931-BA2D-416E-95A3-D36E8F6E90B9}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {3D166931-BA2D-416E-95A3-D36E8F6E90B9}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Debug|Any CPU.Build.0 = Debug|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Release|Any CPU.ActiveCfg = Release|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Release|Any CPU.Build.0 = Release|Any CPU
- {4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Release|Any CPU.Build.0 = Release|Any CPU
- {61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Release|Any CPU.Build.0 = Release|Any CPU
- {7DC1433E-3225-42C7-B7EA-546D56E27A4B}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {7DC1433E-3225-42C7-B7EA-546D56E27A4B}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Debug|Any CPU.Build.0 = Debug|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Release|Any CPU.ActiveCfg = Release|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Release|Any CPU.Build.0 = Release|Any CPU
- {86EC5CB4-4EA2-40A2-8057-86542A0353BB}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {86EC5CB4-4EA2-40A2-8057-86542A0353BB}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|Any CPU.Build.0 = Release|Any CPU
- {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Release|Any CPU.Build.0 = Release|Any CPU
- {AA5E328A-8835-49D7-98ED-C29F2B3049F0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {AA5E328A-8835-49D7-98ED-C29F2B3049F0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Debug|Any CPU.Build.0 = Debug|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Release|Any CPU.ActiveCfg = Release|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Release|Any CPU.Build.0 = Release|Any CPU
- {ADEBA147-80AE-4710-82E9-5B7F93690266}.ReleaseSigned|Any CPU.ActiveCfg = Release|Any CPU
- {ADEBA147-80AE-4710-82E9-5B7F93690266}.ReleaseSigned|Any CPU.Build.0 = Release|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|Any CPU.Build.0 = Release|Any CPU
- {AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Release|Any CPU.Build.0 = Release|Any CPU
- {B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.ReleaseSigned|Any CPU.ActiveCfg = Release|Any CPU
- {B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.ReleaseSigned|Any CPU.Build.0 = Release|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Release|Any CPU.Build.0 = Release|Any CPU
- {B88F91D6-436D-4C78-8B99-47800FA8DE03}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {B88F91D6-436D-4C78-8B99-47800FA8DE03}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|Any CPU.Build.0 = Debug|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|Any CPU.ActiveCfg = Release|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|Any CPU.Build.0 = Release|Any CPU
- {BF62FE08-373A-43D6-9D73-41CAA38B7011}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {BF62FE08-373A-43D6-9D73-41CAA38B7011}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Release|Any CPU.Build.0 = Release|Any CPU
- {C61154BA-DD4A-4838-8420-0162A28925E0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {C61154BA-DD4A-4838-8420-0162A28925E0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Release|Any CPU.Build.0 = Release|Any CPU
- {CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Release|Any CPU.Build.0 = Release|Any CPU
- {F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
EndGlobalSection
diff --git a/src/node/performance/worker_service_impl.js b/src/node/performance/worker_service_impl.js index 3f317f6429..38888a7219 100644 --- a/src/node/performance/worker_service_impl.js +++ b/src/node/performance/worker_service_impl.js @@ -55,9 +55,8 @@ module.exports = function WorkerServiceImpl(benchmark_impl, server) { } this.quitWorker = function quitWorker(call, callback) { - server.tryShutdown(function() { - callback(null, {}); - }); + callback(null, {}); + server.tryShutdown(function() {}); }; this.runClient = function runClient(call) { diff --git a/src/node/src/client.js b/src/node/src/client.js index 9c1562e8b8..134ef239c2 100644 --- a/src/node/src/client.js +++ b/src/node/src/client.js @@ -99,7 +99,18 @@ function ClientWritableStream(call, serialize) { function _write(chunk, encoding, callback) { /* jshint validthis: true */ var batch = {}; - var message = this.serialize(chunk); + var message; + try { + message = this.serialize(chunk); + } catch (e) { + /* Sending this error to the server and emitting it immediately on the + client may put the call in a slightly weird state on the client side, + but passing an object that causes a serialization failure is a misuse + of the API anyway, so that's OK. The primary purpose here is to give the + programmer a useful error and to stop the stream properly */ + this.call.cancelWithStatus(grpc.status.INTERNAL, "Serialization failure"); + callback(e); + } if (_.isFinite(encoding)) { /* Attach the encoding if it is a finite number. This is the closest we * can get to checking that it is valid flags */ @@ -184,14 +195,15 @@ function _emitStatusIfDone() { } else { status = this.received_status; } - this.emit('status', status); - if (status.code !== grpc.status.OK) { + if (status.code === grpc.status.OK) { + this.push(null); + } else { var error = new Error(status.details); error.code = status.code; error.metadata = status.metadata; this.emit('error', error); - return; } + this.emit('status', status); } } @@ -224,9 +236,11 @@ function _read(size) { } catch (e) { self._readsDone({code: grpc.status.INTERNAL, details: 'Failed to parse server response'}); + return; } if (data === null) { self._readsDone(); + return; } if (self.push(deserialized) && data !== null) { var read_batch = {}; @@ -396,6 +410,8 @@ function makeUnaryRequestFunction(method, serialize, deserialize) { var status = response.status; var error; var deserialized; + emitter.emit('metadata', Metadata._fromCoreRepresentation( + response.metadata)); if (status.code === grpc.status.OK) { if (err) { // Got a batch error, but OK status. Something went wrong @@ -423,8 +439,6 @@ function makeUnaryRequestFunction(method, serialize, deserialize) { args.callback(null, deserialized); } emitter.emit('status', status); - emitter.emit('metadata', Metadata._fromCoreRepresentation( - response.metadata)); }); return emitter; } diff --git a/src/node/src/server.js b/src/node/src/server.js index b3b414969a..da9c6b2d7f 100644 --- a/src/node/src/server.js +++ b/src/node/src/server.js @@ -127,7 +127,14 @@ function sendUnaryResponse(call, value, serialize, metadata, flags) { (new Metadata())._getCoreRepresentation(); call.metadataSent = true; } - var message = serialize(value); + var message; + try { + message = serialize(value); + } catch (e) { + e.code = grpc.status.INTERNAL; + handleError(e); + return; + } message.grpcWriteFlags = flags; end_batch[grpc.opType.SEND_MESSAGE] = message; end_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status; @@ -278,7 +285,14 @@ function _write(chunk, encoding, callback) { (new Metadata())._getCoreRepresentation(); this.call.metadataSent = true; } - var message = this.serialize(chunk); + var message; + try { + message = this.serialize(chunk); + } catch (e) { + e.code = grpc.status.INTERNAL; + callback(e); + return; + } if (_.isFinite(encoding)) { /* Attach the encoding if it is a finite number. This is the closest we * can get to checking that it is valid flags */ diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js index 17c62d5635..e429a3648b 100644 --- a/src/node/test/surface_test.js +++ b/src/node/test/surface_test.js @@ -179,8 +179,8 @@ describe('Server.prototype.addProtoService', function() { call.on('data', function(value) { assert.fail('No messages expected'); }); - call.on('status', function(status) { - assert.strictEqual(status.code, grpc.status.UNIMPLEMENTED); + call.on('error', function(err) { + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); done(); }); call.on('error', function(status) { /* Do nothing */ }); @@ -190,8 +190,8 @@ describe('Server.prototype.addProtoService', function() { call.on('data', function(value) { assert.fail('No messages expected'); }); - call.on('status', function(status) { - assert.strictEqual(status.code, grpc.status.UNIMPLEMENTED); + call.on('error', function(err) { + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); done(); }); call.on('error', function(status) { /* Do nothing */ }); diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec index 6e594fd3ed..bcc2bb6126 100644 --- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler-gRPCPlugin' - v = '1.0.1' + v = '1.0.2' s.version = v s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.' s.description = <<-DESC @@ -84,7 +84,10 @@ Pod::Spec.new do |s| repo = 'grpc/grpc' file = "grpc_objective_c_plugin-#{v}-macos-x86_64.zip" s.source = { - :http => "https://github.com/#{repo}/releases/download/v#{v}/#{file}", + # TODO(mxyan): Change back to "https://github.com/#{repo}/releases/download/v#{v}/#{file}" for + # next release + # :http => "https://github.com/#{repo}/releases/download/v#{v}/#{file}", + :http => "https://github.com/#{repo}/releases/download/objective-c-v#{v}/#{file}", # TODO(jcanizales): Add sha1 or sha256 # :sha1 => '??', } diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m index 31065cbf01..450bec36e0 100644 --- a/src/objective-c/GRPCClient/private/GRPCHost.m +++ b/src/objective-c/GRPCClient/private/GRPCHost.m @@ -50,7 +50,7 @@ NS_ASSUME_NONNULL_BEGIN // TODO(jcanizales): Generate the version in a standalone header, from templates. Like // templates/src/core/surface/version.c.template . -#define GRPC_OBJC_VERSION_STRING @"1.0.1" +#define GRPC_OBJC_VERSION_STRING @"1.0.2" static NSMutableDictionary *kHostCache; diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m index 627b6aa86d..38fcae0299 100644 --- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m +++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m @@ -112,7 +112,7 @@ } - (void)dealloc { - gpr_free(_op.data.send_message); + grpc_byte_buffer_destroy(_op.data.send_message); } @end diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m index 4a92cc8e0d..4ba7badd86 100644 --- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m +++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m @@ -316,7 +316,8 @@ static char *roots_filename; } - (void)testInvokeLargeRequest { - [self testIndividualCase:"invoke_large_request"]; + // NOT SUPPORTED (frame size) + // [self testIndividualCase:"invoke_large_request"]; } - (void)testLargeMetadata { @@ -329,7 +330,8 @@ static char *roots_filename; } - (void)testMaxMessageLength { - [self testIndividualCase:"max_message_length"]; + // NOT SUPPORTED (close_error) + // [self testIndividualCase:"max_message_length"]; } - (void)testNegativeDeadline { diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m index 77640525d5..0b72a75f3d 100644 --- a/src/objective-c/tests/GRPCClientTests.m +++ b/src/objective-c/tests/GRPCClientTests.m @@ -43,6 +43,8 @@ #import <RxLibrary/GRXWriteable.h> #import <RxLibrary/GRXWriter+Immediate.h> +#define TEST_TIMEOUT 16 + static NSString * const kHostAddress = @"localhost:5050"; static NSString * const kPackage = @"grpc.testing"; static NSString * const kService = @"TestService"; @@ -137,7 +139,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testEmptyRPC { @@ -159,7 +161,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testSimpleProtoRPC { @@ -191,7 +193,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testMetadata { @@ -225,7 +227,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testResponseMetadataKVO { @@ -256,7 +258,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testUserAgentPrefix { @@ -287,7 +289,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } // TODO(makarandd): Move to a different file that contains only unit tests @@ -347,7 +349,7 @@ static GRPCProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } @end diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m index 9804734d6a..c3935ce1e0 100644 --- a/src/objective-c/tests/InteropTests.m +++ b/src/objective-c/tests/InteropTests.m @@ -46,6 +46,8 @@ #import <RxLibrary/GRXBufferedPipe.h> #import <RxLibrary/GRXWriter+Immediate.h> +#define TEST_TIMEOUT 32 + // Convenience constructors for the generated proto messages: @interface RMTStreamingOutputCallRequest (Constructors) @@ -124,7 +126,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testLargeUnaryRPC { @@ -147,7 +149,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:16 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)test4MBResponsesAreAccepted { @@ -164,7 +166,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:16 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testResponsesOverMaxSizeFailWithActionableMessage { @@ -185,7 +187,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:16 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testResponsesOver4MBAreAcceptedIfOptedIn { @@ -205,7 +207,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:16 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testClientStreamingRPC { @@ -238,7 +240,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testServerStreamingRPC { @@ -275,7 +277,7 @@ } }]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testPingPongRPC { @@ -319,7 +321,7 @@ [expectation fulfill]; } }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } #ifndef GRPC_COMPILE_WITH_CRONET @@ -335,7 +337,7 @@ XCTAssert(done, @"Unexpected response: %@", response); [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:2 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } #endif @@ -361,7 +363,7 @@ [call cancel]; XCTAssertEqual(call.state, GRXWriterStateFinished); - [self waitForExpectationsWithTimeout:1 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testCancelAfterFirstResponseRPC { @@ -396,7 +398,7 @@ } }]; [call start]; - [self waitForExpectationsWithTimeout:8 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } - (void)testRPCAfterClosingOpenConnections { @@ -420,7 +422,7 @@ }]; }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil]; } @end diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php index c4d56790f7..9f0b02b8bb 100644 --- a/src/php/lib/Grpc/AbstractCall.php +++ b/src/php/lib/Grpc/AbstractCall.php @@ -62,7 +62,7 @@ abstract class AbstractCall Channel $channel, $method, $deserialize, - $options = [] + array $options = [] ) { if (array_key_exists('timeout', $options) && is_numeric($timeout = $options['timeout']) @@ -89,7 +89,7 @@ abstract class AbstractCall } /** - * @return mixed The metadata sent by the server. + * @return mixed The metadata sent by the server */ public function getMetadata() { @@ -97,7 +97,7 @@ abstract class AbstractCall } /** - * @return mixed The trailing metadata sent by the server. + * @return mixed The trailing metadata sent by the server */ public function getTrailingMetadata() { @@ -105,7 +105,7 @@ abstract class AbstractCall } /** - * @return string The URI of the endpoint. + * @return string The URI of the endpoint */ public function getPeer() { @@ -167,8 +167,7 @@ abstract class AbstractCall /** * Set the CallCredentials for the underlying Call. * - * @param CallCredentials $call_credentials The CallCredentials - * object + * @param CallCredentials $call_credentials The CallCredentials object */ public function setCallCredentials($call_credentials) { diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php index d0baeae955..aec60af094 100644 --- a/src/php/lib/Grpc/BaseStub.php +++ b/src/php/lib/Grpc/BaseStub.php @@ -48,14 +48,14 @@ class BaseStub private $update_metadata; /** - * @param $hostname string - * @param $opts array + * @param string $hostname + * @param array $opts * - 'update_metadata': (optional) a callback function which takes in a * metadata array, and returns an updated metadata array * - 'grpc.primary_user_agent': (optional) a user-agent string - * @param $channel Channel An already created Channel object + * @param Channel $channel An already created Channel object (optional) */ - public function __construct($hostname, $opts, $channel = null) + public function __construct($hostname, $opts, Channel $channel = null) { $ssl_roots = file_get_contents( dirname(__FILE__).'/../../../../etc/roots.pem'); @@ -98,7 +98,7 @@ class BaseStub } /** - * @return string The URI of the endpoint. + * @return string The URI of the endpoint */ public function getTarget() { @@ -106,7 +106,7 @@ class BaseStub } /** - * @param $try_to_connect bool + * @param bool $try_to_connect (optional) * * @return int The grpc connectivity state */ @@ -145,6 +145,12 @@ class BaseStub return $this->_checkConnectivityState($new_state); } + /** + * @param $new_state Connect state + * + * @return bool true if state is CHANNEL_READY + * @throw Exception if state is CHANNEL_FATAL_FAILURE + */ private function _checkConnectivityState($new_state) { if ($new_state == \Grpc\CHANNEL_READY) { @@ -167,6 +173,10 @@ class BaseStub /** * constructs the auth uri for the jwt. + * + * @param string $method The method string + * + * @return string The URL string */ private function _get_jwt_aud_uri($method) { @@ -191,7 +201,7 @@ class BaseStub * * @param array $metadata The metadata map * - * @return $metadata Validated and key-normalized metadata map + * @return array $metadata Validated and key-normalized metadata map * @throw InvalidArgumentException if key contains invalid characters */ private function _validate_and_normalize_metadata($metadata) @@ -220,14 +230,16 @@ class BaseStub * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return SimpleSurfaceActiveCall The active call object */ public function _simpleRequest($method, $argument, $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new UnaryCall($this->channel, $method, @@ -251,17 +263,17 @@ class BaseStub * output. * * @param string $method The name of the method to call - * @param array $arguments An array or Traversable of arguments to stream to the - * server * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return ClientStreamingSurfaceActiveCall The active call object */ public function _clientStreamRequest($method, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new ClientStreamingCall($this->channel, $method, @@ -281,21 +293,23 @@ class BaseStub } /** - * Call a remote method that takes a single argument and returns a stream of - * responses. + * Call a remote method that takes a single argument and returns a stream + * of responses. * * @param string $method The name of the method to call * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the responses * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return ServerStreamingSurfaceActiveCall The active call object */ public function _serverStreamRequest($method, $argument, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new ServerStreamingCall($this->channel, $method, @@ -320,13 +334,15 @@ class BaseStub * @param string $method The name of the method to call * @param callable $deserialize A function that deserializes the responses * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return BidiStreamingSurfaceActiveCall The active call object */ public function _bidiRequest($method, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new BidiStreamingCall($this->channel, $method, diff --git a/src/php/lib/Grpc/BidiStreamingCall.php b/src/php/lib/Grpc/BidiStreamingCall.php index f0e1e811de..b03bbd204f 100644 --- a/src/php/lib/Grpc/BidiStreamingCall.php +++ b/src/php/lib/Grpc/BidiStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that allows for sending and recieving messages in - * streams in any order. + * Represents an active call that allows for sending and recieving messages + * in streams in any order. */ class BidiStreamingCall extends AbstractCall { @@ -44,6 +44,7 @@ class BidiStreamingCall extends AbstractCall * Start the call. * * @param array $metadata Metadata to send with the call, if applicable + * (optional) */ public function start(array $metadata = []) { @@ -76,10 +77,10 @@ class BidiStreamingCall extends AbstractCall * writesDone is called. * * @param ByteBuffer $data The data to write - * @param array $options an array of options, possible keys: - * 'flags' => a number + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function write($data, $options = []) + public function write($data, array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (array_key_exists('flags', $options)) { @@ -103,8 +104,8 @@ class BidiStreamingCall extends AbstractCall /** * Wait for the server to send the status, and return it. * - * @return \stdClass The status object, with integer $code, string $details, - * and array $metadata members + * @return \stdClass The status object, with integer $code, string + * $details, and array $metadata members */ public function getStatus() { diff --git a/src/php/lib/Grpc/ClientStreamingCall.php b/src/php/lib/Grpc/ClientStreamingCall.php index 20db809ea3..c542f08872 100644 --- a/src/php/lib/Grpc/ClientStreamingCall.php +++ b/src/php/lib/Grpc/ClientStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a stream of messages and then gets a - * single response. + * Represents an active call that sends a stream of messages and then gets + * a single response. */ class ClientStreamingCall extends AbstractCall { @@ -44,8 +44,9 @@ class ClientStreamingCall extends AbstractCall * Start the call. * * @param array $metadata Metadata to send with the call, if applicable + * (optional) */ - public function start($metadata = []) + public function start(array $metadata = []) { $this->call->startBatch([ OP_SEND_INITIAL_METADATA => $metadata, @@ -57,8 +58,8 @@ class ClientStreamingCall extends AbstractCall * wait is called. * * @param ByteBuffer $data The data to write - * @param array $options an array of options, possible keys: - * 'flags' => a number + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ public function write($data, array $options = []) { diff --git a/src/php/lib/Grpc/ServerStreamingCall.php b/src/php/lib/Grpc/ServerStreamingCall.php index 5aeeafa94a..406512bf57 100644 --- a/src/php/lib/Grpc/ServerStreamingCall.php +++ b/src/php/lib/Grpc/ServerStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a single message and then gets a stream - * of responses. + * Represents an active call that sends a single message and then gets a + * stream of responses. */ class ServerStreamingCall extends AbstractCall { @@ -45,10 +45,11 @@ class ServerStreamingCall extends AbstractCall * * @param mixed $data The data to send * @param array $metadata Metadata to send with the call, if applicable - * @param array $options an array of options, possible keys: - * 'flags' => a number + * (optional) + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function start($data, $metadata = [], $options = []) + public function start($data, array $metadata = [], array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (array_key_exists('flags', $options)) { @@ -82,8 +83,8 @@ class ServerStreamingCall extends AbstractCall /** * Wait for the server to send the status, and return it. * - * @return \stdClass The status object, with integer $code, string $details, - * and array $metadata members + * @return \stdClass The status object, with integer $code, string + * $details, and array $metadata members */ public function getStatus() { diff --git a/src/php/lib/Grpc/UnaryCall.php b/src/php/lib/Grpc/UnaryCall.php index e8eb6487a8..3c1cb158ea 100644 --- a/src/php/lib/Grpc/UnaryCall.php +++ b/src/php/lib/Grpc/UnaryCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a single message and then gets a single - * response. + * Represents an active call that sends a single message and then gets a + * single response. */ class UnaryCall extends AbstractCall { @@ -45,10 +45,11 @@ class UnaryCall extends AbstractCall * * @param mixed $data The data to send * @param array $metadata Metadata to send with the call, if applicable - * @param array $options an array of options, possible keys: - * 'flags' => a number + * (optional) + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function start($data, $metadata = [], $options = []) + public function start($data, array $metadata = [], array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (isset($options['flags'])) { diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py index ea3b6f3391..701c6af017 100644 --- a/src/python/grpcio/commands.py +++ b/src/python/grpcio/commands.py @@ -62,6 +62,7 @@ napoleon_numpy_docstring = True napoleon_include_special_with_doc = True html_theme = 'sphinx_rtd_theme' +copyright = "2016, The gRPC Authors" """ API_GLOSSARY = """ diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index 6087276d51..e3c10156d0 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -31,6 +31,7 @@ import abc import enum +import sys import six @@ -767,8 +768,8 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): gRPC runtime to determine the status code of the RPC. Args: - code: The integer status code of the RPC to be transmitted to the - invocation side of the RPC. + code: A StatusCode value to be transmitted to the invocation side of the + RPC as the status code of the RPC. """ raise NotImplementedError() @@ -780,8 +781,8 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): details to transmit. Args: - details: The details string of the RPC to be transmitted to - the invocation side of the RPC. + details: A string to be transmitted to the invocation side of the RPC as + the status details of the RPC. """ raise NotImplementedError() @@ -849,6 +850,26 @@ class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() +class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)): + """An implementation of RPC methods belonging to a service. + + A service handles RPC methods with structured names of the form + '/Service.Name/Service.MethodX', where 'Service.Name' is the value + returned by service_name(), and 'Service.MethodX' is the service method + name. A service can have multiple service methods names, but only a single + service name. + """ + + @abc.abstractmethod + def service_name(self): + """Returns this services name. + + Returns: + The service name. + """ + raise NotImplementedError() + + ############################# Server Interface ############################### @@ -905,21 +926,6 @@ class Server(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() @abc.abstractmethod - def add_shutdown_handler(self, shutdown_handler): - """Adds a handler to be called on server shutdown. - - Shutdown handlers are run on server stop() or in the event that a running - server is destroyed unexpectedly. The handlers are run in series before - the stop grace period. - - Args: - shutdown_handler: A function taking a single arg, a time in seconds - within which the handler should complete. None indicates the handler can - run for any duration. - """ - raise NotImplementedError() - - @abc.abstractmethod def start(self): """Starts this Server's service of RPCs. @@ -929,7 +935,7 @@ class Server(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() @abc.abstractmethod - def stop(self, grace, shutdown_handler_grace=None): + def stop(self, grace): """Stops this Server's service of RPCs. All calls to this method immediately stop service of new RPCs. When existing @@ -952,8 +958,6 @@ class Server(six.with_metaclass(abc.ABCMeta)): aborted by this Server's stopping. If None, all RPCs will be aborted immediately and this method will block until this Server is completely stopped. - shutdown_handler_grace: A duration of time in seconds or None. This - value is passed to all shutdown handlers. Returns: A threading.Event that will be set when this Server has completely @@ -1248,8 +1252,7 @@ def secure_channel(target, credentials, options=None): credentials._credentials) -def server(thread_pool, handlers=None, options=None, exit_grace=None, - exit_shutdown_handler_grace=None): +def server(thread_pool, handlers=None, options=None): """Creates a Server with which RPCs can be serviced. Args: @@ -1262,19 +1265,13 @@ def server(thread_pool, handlers=None, options=None, exit_grace=None, returned Server is started. options: A sequence of string-value pairs according to which to configure the created server. - exit_grace: The grace period to use when terminating - running servers at interpreter exit. None indicates unspecified. - exit_shutdown_handler_grace: The shutdown handler grace to use when - terminating running servers at interpreter exit. None indicates - unspecified. Returns: A Server with which RPCs can be serviced. """ from grpc import _server return _server.Server(thread_pool, () if handlers is None else handlers, - () if options is None else options, exit_grace, - exit_shutdown_handler_grace) + () if options is None else options) ################################### __all__ ################################# @@ -1304,6 +1301,7 @@ __all__ = ( 'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler', + 'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler', @@ -1321,3 +1319,24 @@ __all__ = ( 'secure_channel', 'server', ) + + +############################### Extension Shims ################################ + + +# Here to maintain backwards compatibility; avoid using these in new code! +try: + import grpc_tools + sys.modules.update({'grpc.tools': grpc_tools}) +except ImportError: + pass +try: + import grpc_health + sys.modules.update({'grpc.health': grpc_health}) +except ImportError: + pass +try: + import grpc_reflection + sys.modules.update({'grpc.reflection': grpc_reflection}) +except ImportError: + pass diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index 53a26727ab..41e9163cd6 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -36,8 +36,8 @@ import time import grpc from grpc import _common from grpc import _grpcio_metadata -from grpc.framework.foundation import callable_util from grpc._cython import cygrpc +from grpc.framework.foundation import callable_util _USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__) @@ -99,6 +99,22 @@ def _wait_once_until(condition, until): else: condition.wait(timeout=remaining) +_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = ( + 'Internal gRPC call error %d. ' + + 'Please report to https://github.com/grpc/grpc/issues') + +def _check_call_error(call_error, metadata): + if call_error == cygrpc.CallError.invalid_metadata: + raise ValueError('metadata was invalid: %s' % metadata) + elif call_error != cygrpc.CallError.ok: + raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error) + +def _call_error_set_RPCstate(state, call_error, metadata): + if call_error == cygrpc.CallError.invalid_metadata: + _abort(state, grpc.StatusCode.INTERNAL, 'metadata was invalid: %s' % metadata) + else: + _abort(state, grpc.StatusCode.INTERNAL, + _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error) class _RPCState(object): @@ -358,7 +374,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): if self._state.callbacks is None: return False else: - self._state.callbacks.append(lambda: callback()) + self._state.callbacks.append(callback) return True def initial_metadata(self): @@ -435,10 +451,10 @@ def _end_unary_response_blocking(state, with_call, deadline): class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -472,7 +488,8 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): None, 0, completion_queue, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) - call.start_client_batch(cygrpc.Operations(operations), None) + call_error = call.start_client_batch(cygrpc.Operations(operations), None) + _check_call_error(call_error, metadata) _handle_event(completion_queue.poll(), state, self._response_deserializer) return state, deadline @@ -490,23 +507,28 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): if rendezvous: return rendezvous else: - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() return _Rendezvous(state, call, self._response_deserializer, deadline) class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -518,7 +540,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): raise rendezvous else: state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -535,17 +557,22 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() return _Rendezvous(state, call, self._response_deserializer, deadline) class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -569,7 +596,8 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), None) + call_error = call.start_client_batch(cygrpc.Operations(operations), None) + _check_call_error(call_error, metadata) _consume_request_iterator( request_iterator, state, call, self._request_serializer) while True: @@ -597,7 +625,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): self, request_iterator, timeout=None, metadata=None, credentials=None): deadline, deadline_timespec = _deadline(timeout) state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -613,7 +641,12 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -622,10 +655,10 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -634,7 +667,7 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): self, request_iterator, timeout=None, metadata=None, credentials=None): deadline, deadline_timespec = _deadline(timeout) state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -649,7 +682,12 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -687,16 +725,13 @@ def _run_channel_spin_thread(state): channel_spin_thread.start() -def _create_channel_managed_call(state): - def create_channel_managed_call(parent, flags, method, host, deadline): - """Creates a managed cygrpc.Call. +def _channel_managed_call_management(state): + def create(parent, flags, method, host, deadline): + """Creates a managed cygrpc.Call and a function to call to drive it. - Callers of this function must conduct at least one operation on the returned - call. The tags associated with operations conducted on the returned call - must be no-argument callables that return None to indicate that this channel - should continue polling for events associated with the call and return the - call itself to indicate that no more events associated with the call will be - generated. + If operations are successfully added to the returned cygrpc.Call, the + returned function must be called. If operations are not successfully added + to the returned cygrpc.Call, the returned function must not be called. Args: parent: A cygrpc.Call to be used as the parent of the created call. @@ -706,18 +741,22 @@ def _create_channel_managed_call(state): deadline: A cygrpc.Timespec to be the deadline of the created call. Returns: - A cygrpc.Call with which to conduct an RPC. + A cygrpc.Call with which to conduct an RPC and a function to call if + operations are successfully started on the call. """ - with state.lock: - call = state.channel.create_call( - parent, flags, state.completion_queue, method, host, deadline) - if state.managed_calls is None: - state.managed_calls = set((call,)) - _run_channel_spin_thread(state) - else: - state.managed_calls.add(call) - return call - return create_channel_managed_call + call = state.channel.create_call( + parent, flags, state.completion_queue, method, host, deadline) + + def drive(): + with state.lock: + if state.managed_calls is None: + state.managed_calls = set((call,)) + _run_channel_spin_thread(state) + else: + state.managed_calls.add(call) + + return call, drive + return create class _ChannelConnectivityState(object): @@ -847,6 +886,7 @@ def _options(options): class Channel(grpc.Channel): + """A cygrpc.Channel-backed implementation of grpc.Channel.""" def __init__(self, target, options, credentials): """Constructor. @@ -871,25 +911,25 @@ class Channel(grpc.Channel): def unary_unary( self, method, request_serializer=None, response_deserializer=None): return _UnaryUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def unary_stream( self, method, request_serializer=None, response_deserializer=None): return _UnaryStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def stream_unary( self, method, request_serializer=None, response_deserializer=None): return _StreamUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def stream_stream( self, method, request_serializer=None, response_deserializer=None): return _StreamStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def __del__(self): diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index d83a2e6ded..5223712dfa 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -60,8 +60,7 @@ _CANCELLED = 'cancelled' _EMPTY_FLAGS = 0 _EMPTY_METADATA = cygrpc.Metadata(()) -_DEFAULT_EXIT_GRACE = 1.0 -_DEFAULT_EXIT_SHUTDOWN_HANDLER_GRACE = 5.0 +_UNEXPECTED_EXIT_SERVER_GRACE = 1.0 def _serialized_request(request_event): @@ -596,18 +595,14 @@ class _ServerStage(enum.Enum): class _ServerState(object): - def __init__(self, completion_queue, server, generic_handlers, thread_pool, - exit_grace, exit_shutdown_handler_grace): + def __init__(self, completion_queue, server, generic_handlers, thread_pool): self.lock = threading.Lock() self.completion_queue = completion_queue self.server = server self.generic_handlers = list(generic_handlers) self.thread_pool = thread_pool - self.exit_grace = exit_grace - self.exit_shutdown_handler_grace = exit_shutdown_handler_grace self.stage = _ServerStage.STOPPED self.shutdown_events = None - self.shutdown_handlers = [] # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields. self.rpc_states = set() @@ -677,45 +672,41 @@ def _serve(state): return -def _stop(state, grace, shutdown_handler_grace): - shutdown_event = threading.Event() - - def cancel_all_calls_after_grace(): - with state.lock: - if state.stage is _ServerStage.STOPPED: - shutdown_event.set() - return - elif state.stage is _ServerStage.STARTED: - do_shutdown = True - state.stage = _ServerStage.GRACE - state.shutdown_events = [] - else: - do_shutdown = False - state.shutdown_events.append(shutdown_event) - - if do_shutdown: - # Run Shutdown Handlers without the lock - for handler in state.shutdown_handlers: - handler(shutdown_handler_grace) - with state.lock: +def _stop(state, grace): + with state.lock: + if state.stage is _ServerStage.STOPPED: + shutdown_event = threading.Event() + shutdown_event.set() + return shutdown_event + else: + if state.stage is _ServerStage.STARTED: state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG) state.stage = _ServerStage.GRACE + state.shutdown_events = [] state.due.add(_SHUTDOWN_TAG) - - if not shutdown_event.wait(timeout=grace): - with state.lock: + shutdown_event = threading.Event() + state.shutdown_events.append(shutdown_event) + if grace is None: state.server.cancel_all_calls() # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop. for rpc_state in state.rpc_states: with rpc_state.condition: rpc_state.client = _CANCELLED rpc_state.condition.notify_all() - - if grace is None: - cancel_all_calls_after_grace() - else: - threading.Thread(target=cancel_all_calls_after_grace).start() - + else: + def cancel_all_calls_after_grace(): + shutdown_event.wait(timeout=grace) + with state.lock: + state.server.cancel_all_calls() + # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop. + for rpc_state in state.rpc_states: + with rpc_state.condition: + rpc_state.client = _CANCELLED + rpc_state.condition.notify_all() + thread = threading.Thread(target=cancel_all_calls_after_grace) + thread.start() + return shutdown_event + shutdown_event.wait() return shutdown_event @@ -725,12 +716,12 @@ def _start(state): raise ValueError('Cannot start already-started server!') state.server.start() state.stage = _ServerStage.STARTED - _request_call(state) + _request_call(state) def cleanup_server(timeout): if timeout is None: - _stop(state, state.exit_grace, state.exit_shutdown_handler_grace).wait() + _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait() else: - _stop(state, timeout, 0).wait() + _stop(state, timeout).wait() thread = _common.CleanupThread( cleanup_server, target=_serve, args=(state,)) @@ -738,16 +729,12 @@ def _start(state): class Server(grpc.Server): - def __init__(self, thread_pool, generic_handlers, options, exit_grace, - exit_shutdown_handler_grace): + def __init__(self, thread_pool, generic_handlers, options): completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server(_common.channel_args(options)) server.register_completion_queue(completion_queue) self._state = _ServerState( - completion_queue, server, generic_handlers, thread_pool, - _DEFAULT_EXIT_GRACE if exit_grace is None else exit_grace, - _DEFAULT_EXIT_SHUTDOWN_HANDLER_GRACE if exit_shutdown_handler_grace - is None else exit_shutdown_handler_grace) + completion_queue, server, generic_handlers, thread_pool) def add_generic_rpc_handlers(self, generic_rpc_handlers): _add_generic_handlers(self._state, generic_rpc_handlers) @@ -758,14 +745,11 @@ class Server(grpc.Server): def add_secure_port(self, address, server_credentials): return _add_secure_port(self._state, _common.encode(address), server_credentials) - def add_shutdown_handler(self, handler): - self._state.shutdown_handlers.append(handler) - def start(self): _start(self._state) - def stop(self, grace, shutdown_handler_grace=None): - return _stop(self._state, grace, shutdown_handler_grace) + def stop(self, grace): + return _stop(self._state, grace) def __del__(self): - _stop(self._state, None, None) + _stop(self._state, None) diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py index 4850967fbc..a375896e6e 100644 --- a/src/python/grpcio/grpc/_utilities.py +++ b/src/python/grpcio/grpc/_utilities.py @@ -53,13 +53,17 @@ class RpcMethodHandler( pass -class DictionaryGenericHandler(grpc.GenericRpcHandler): +class DictionaryGenericHandler(grpc.ServiceRpcHandler): def __init__(self, service, method_handlers): + self._name = service self._method_handlers = { _common.fully_qualified_method(service, method): method_handler for method, method_handler in six.iteritems(method_handlers)} + def service_name(self): + return self._name + def service(self, handler_call_details): return self._method_handlers.get(handler_call_details.method) diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py index f363f5fdc5..b226e690fd 100644 --- a/src/python/grpcio/support.py +++ b/src/python/grpcio/support.py @@ -100,9 +100,15 @@ def diagnose_compile_error(build_ext, error): .format(source) ) +def diagnose_attribute_error(build_ext, error): + if any('_needs_stub' in arg for arg in error.args): + raise commands.CommandError( + "We expect a missing `_needs_stub` attribute from older versions of " + "setuptools. Consider upgrading setuptools.") _ERROR_DIAGNOSES = { - errors.CompileError: diagnose_compile_error + errors.CompileError: diagnose_compile_error, + AttributeError: diagnose_attribute_error } def diagnose_build_ext_error(build_ext, error, formatted): diff --git a/src/python/grpcio_health_checking/MANIFEST.in b/src/python/grpcio_health_checking/MANIFEST.in index 7407f646d1..5255e4c403 100644 --- a/src/python/grpcio_health_checking/MANIFEST.in +++ b/src/python/grpcio_health_checking/MANIFEST.in @@ -1,4 +1,4 @@ include grpc_version.py include health_commands.py -graft grpc +graft grpc_health global-exclude *.pyc diff --git a/src/python/grpcio_health_checking/grpc/health/__init__.py b/src/python/grpcio_health_checking/grpc_health/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio_health_checking/grpc/health/__init__.py +++ b/src/python/grpcio_health_checking/grpc_health/__init__.py diff --git a/src/python/grpcio_health_checking/grpc/health/v1/__init__.py b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio_health_checking/grpc/health/v1/__init__.py +++ b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py diff --git a/src/python/grpcio_health_checking/grpc/health/v1/health.py b/src/python/grpcio_health_checking/grpc_health/v1/health.py index 8108ac1096..0df679b0e2 100644 --- a/src/python/grpcio_health_checking/grpc/health/v1/health.py +++ b/src/python/grpcio_health_checking/grpc_health/v1/health.py @@ -33,7 +33,7 @@ import threading import grpc -from grpc.health.v1 import health_pb2 +from grpc_health.v1 import health_pb2 class HealthServicer(health_pb2.HealthServicer): diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py index 66df25da63..0c420a655f 100644 --- a/src/python/grpcio_health_checking/health_commands.py +++ b/src/python/grpcio_health_checking/health_commands.py @@ -54,7 +54,7 @@ class CopyProtoModules(setuptools.Command): if os.path.isfile(HEALTH_PROTO): shutil.copyfile( HEALTH_PROTO, - os.path.join(ROOT_DIR, 'grpc/health/v1/health.proto')) + os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto')) class BuildPackageProtos(setuptools.Command): @@ -74,5 +74,5 @@ class BuildPackageProtos(setuptools.Command): # directory is provided as an 'include' directory. We assume it's the '' key # to `self.distribution.package_dir` (and get a key error if it's not # there). - from grpc.tools import command + from grpc_tools import command command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py index 8c92ee16a9..e88f389ba8 100644 --- a/src/python/grpcio_health_checking/setup.py +++ b/src/python/grpcio_health_checking/setup.py @@ -66,7 +66,6 @@ setuptools.setup( license='3-clause BSD', package_dir=PACKAGE_DIRECTORIES, packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, cmdclass=COMMAND_CLASS diff --git a/src/python/grpcio_reflection/grpc/reflection/__init__.py b/src/python/grpcio_reflection/grpc_reflection/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/src/python/grpcio_reflection/grpc/reflection/__init__.py +++ b/src/python/grpcio_reflection/grpc_reflection/__init__.py diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py +++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py index 3c399b0d79..bfcbce8e04 100644 --- a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py +++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py @@ -35,7 +35,7 @@ import grpc from google.protobuf import descriptor_pb2 from google.protobuf import descriptor_pool -from grpc.reflection.v1alpha import reflection_pb2 +from grpc_reflection.v1alpha import reflection_pb2 _POOL = descriptor_pool.Default() diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py index d189aee577..dee5491e0a 100644 --- a/src/python/grpcio_reflection/reflection_commands.py +++ b/src/python/grpcio_reflection/reflection_commands.py @@ -54,7 +54,7 @@ class CopyProtoModules(setuptools.Command): if os.path.isfile(HEALTH_PROTO): shutil.copyfile( HEALTH_PROTO, - os.path.join(ROOT_DIR, 'grpc/reflection/v1alpha/reflection.proto')) + os.path.join(ROOT_DIR, 'grpc_reflection/v1alpha/reflection.proto')) class BuildPackageProtos(setuptools.Command): @@ -74,5 +74,5 @@ class BuildPackageProtos(setuptools.Command): # directory is provided as an 'include' directory. We assume it's the '' key # to `self.distribution.package_dir` (and get a key error if it's not # there). - from grpc.tools import command + from grpc_tools import command command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py index df95af4de1..cfc41f4fe7 100644 --- a/src/python/grpcio_reflection/setup.py +++ b/src/python/grpcio_reflection/setup.py @@ -66,7 +66,6 @@ setuptools.setup( license='3-clause BSD', package_dir=PACKAGE_DIRECTORIES, packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, cmdclass=COMMAND_CLASS diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py index 5ee551cfe1..e822971fe0 100644 --- a/src/python/grpcio_tests/commands.py +++ b/src/python/grpcio_tests/commands.py @@ -100,7 +100,7 @@ class BuildProtoModules(setuptools.Command): pass def run(self): - import grpc.tools.protoc as protoc + import grpc_tools.protoc as protoc include_regex = re.compile(self.include) exclude_regex = re.compile(self.exclude) if self.exclude else None @@ -116,7 +116,7 @@ class BuildProtoModules(setuptools.Command): # but we currently have name conflicts in src/proto for path in paths: command = [ - 'grpc.tools.protoc', + 'grpc_tools.protoc', '-I {}'.format(PROTO_STEM), '--python_out={}'.format(PROTO_STEM), '--grpc_python_out={}'.format(PROTO_STEM), diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py index 01d5fa875b..375fbd6c77 100644 --- a/src/python/grpcio_tests/setup.py +++ b/src/python/grpcio_tests/setup.py @@ -35,7 +35,7 @@ import sys import setuptools -import grpc.tools.command +import grpc_tools.command PY3 = sys.version_info.major == 3 @@ -68,7 +68,7 @@ COMMAND_CLASS = { # Run `preprocess` *before* doing any packaging! 'preprocess': commands.GatherProto, - 'build_package_protos': grpc.tools.command.BuildPackageProtos, + 'build_package_protos': grpc_tools.command.BuildPackageProtos, 'build_py': commands.BuildPy, 'run_interop': commands.RunInterop, 'test_lite': commands.TestLite diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py index 80300d13df..5dde72b169 100644 --- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py +++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py @@ -27,14 +27,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests of grpc.health.v1.health.""" +"""Tests of grpc_health.v1.health.""" import unittest import grpc from grpc.framework.foundation import logging_pool -from grpc.health.v1 import health -from grpc.health.v1 import health_pb2 +from grpc_health.v1 import health +from grpc_health.v1 import health_pb2 from tests.unit.framework.common import test_constants diff --git a/src/python/grpcio_tests/tests/http2/_negative_http2_client.py b/src/python/grpcio_tests/tests/http2/_negative_http2_client.py new file mode 100644 index 0000000000..f8604683b3 --- /dev/null +++ b/src/python/grpcio_tests/tests/http2/_negative_http2_client.py @@ -0,0 +1,153 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""The Python client used to test negative http2 conditions.""" + +import argparse + +import grpc +from src.proto.grpc.testing import test_pb2 +from src.proto.grpc.testing import messages_pb2 + +def _validate_payload_type_and_length(response, expected_type, expected_length): + if response.payload.type is not expected_type: + raise ValueError( + 'expected payload type %s, got %s' % + (expected_type, type(response.payload.type))) + elif len(response.payload.body) != expected_length: + raise ValueError( + 'expected payload body size %d, got %d' % + (expected_length, len(response.payload.body))) + +def _expect_status_code(call, expected_code): + if call.code() != expected_code: + raise ValueError( + 'expected code %s, got %s' % (expected_code, call.code())) + +def _expect_status_details(call, expected_details): + if call.details() != expected_details: + raise ValueError( + 'expected message %s, got %s' % (expected_details, call.details())) + +def _validate_status_code_and_details(call, expected_code, expected_details): + _expect_status_code(call, expected_code) + _expect_status_details(call, expected_details) + +# common requests +_REQUEST_SIZE = 314159 +_RESPONSE_SIZE = 271828 + +_SIMPLE_REQUEST = messages_pb2.SimpleRequest( + response_type=messages_pb2.COMPRESSABLE, + response_size=_RESPONSE_SIZE, + payload=messages_pb2.Payload(body=b'\x00' * _REQUEST_SIZE)) + +def _goaway(stub): + first_response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(first_response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + second_response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(second_response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _rst_after_header(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNAVAILABLE, "") + +def _rst_during_data(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "") + +def _rst_after_data(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_payload_type_and_length(next(resp_future), + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "") + +def _ping(stub): + response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _max_streams(stub): + # send one req to ensure server sets MAX_STREAMS + response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + + # give the streams a workout + futures = [] + for _ in range(15): + futures.append(stub.UnaryCall.future(_SIMPLE_REQUEST)) + for future in futures: + _validate_payload_type_and_length(future.result(), + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _run_test_case(test_case, stub): + if test_case == 'goaway': + _goaway(stub) + elif test_case == 'rst_after_header': + _rst_after_header(stub) + elif test_case == 'rst_during_data': + _rst_during_data(stub) + elif test_case == 'rst_after_data': + _rst_after_data(stub) + elif test_case =='ping': + _ping(stub) + elif test_case == 'max_streams': + _max_streams(stub) + else: + raise ValueError("Invalid test case: %s" % test_case) + +def _args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--server_host', help='the host to which to connect', type=str, + default="127.0.0.1") + parser.add_argument( + '--server_port', help='the port to which to connect', type=int, + default="8080") + parser.add_argument( + '--test_case', help='the test case to execute', type=str, + default="goaway") + return parser.parse_args() + +def _stub(server_host, server_port): + target = '{}:{}'.format(server_host, server_port) + channel = grpc.insecure_channel(target) + return test_pb2.TestServiceStub(channel) + +def main(): + args = _args() + stub = _stub(args.server_host, args.server_port) + _run_test_case(args.test_case, stub) + + +if __name__ == '__main__': + main() diff --git a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py index 936c895bd2..4fb22b4d9d 100644 --- a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py @@ -35,13 +35,13 @@ import unittest import grpc from src.proto.grpc.testing import test_pb2 -from tests.interop import _interop_test_case +from tests.interop import _intraop_test_case from tests.interop import methods from tests.interop import server -class InsecureInteropTest( - _interop_test_case.InteropTestCase, +class InsecureIntraopTest( + _intraop_test_case.IntraopTestCase, unittest.TestCase): def setUp(self): diff --git a/src/python/grpcio_tests/tests/interop/_interop_test_case.py b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py index ccea17a66d..fe1c173992 100644 --- a/src/python/grpcio_tests/tests/interop/_interop_test_case.py +++ b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py @@ -32,7 +32,7 @@ from tests.interop import methods -class InteropTestCase(object): +class IntraopTestCase(object): """Unit test methods. This class must be mixed in with unittest.TestCase and a class that defines diff --git a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py index eaca553e1b..3665c69726 100644 --- a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py @@ -35,15 +35,15 @@ import unittest import grpc from src.proto.grpc.testing import test_pb2 -from tests.interop import _interop_test_case +from tests.interop import _intraop_test_case from tests.interop import methods from tests.interop import resources _SERVER_HOST_OVERRIDE = 'foo.test.google.fr' -class SecureInteropTest( - _interop_test_case.InteropTestCase, +class SecureIntraopTest( + _intraop_test_case.IntraopTestCase, unittest.TestCase): def setUp(self): diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py index 52e56f3502..9038ae5751 100644 --- a/src/python/grpcio_tests/tests/interop/methods.py +++ b/src/python/grpcio_tests/tests/interop/methods.py @@ -33,7 +33,6 @@ import enum import json import os import threading -import time from oauth2client import client as oauth2client_client @@ -196,16 +195,6 @@ def _server_streaming(stub): response, messages_pb2.COMPRESSABLE, sizes[index]) -def _cancel_after_begin(stub): - sizes = (27182, 8, 1828, 45904,) - payloads = (messages_pb2.Payload(body=b'\x00' * size) for size in sizes) - requests = (messages_pb2.StreamingInputCallRequest(payload=payload) - for payload in payloads) - response_future = stub.StreamingInputCall.future(requests) - response_future.cancel() - if not response_future.cancelled(): - raise ValueError('expected call to be cancelled') - class _Pipe(object): @@ -265,6 +254,16 @@ def _ping_pong(stub): response, messages_pb2.COMPRESSABLE, response_size) +def _cancel_after_begin(stub): + with _Pipe() as pipe: + response_future = stub.StreamingInputCall.future(pipe) + response_future.cancel() + if not response_future.cancelled(): + raise ValueError('expected cancelled method to return True') + if response_future.code() is not grpc.StatusCode.CANCELLED: + raise ValueError('expected status code CANCELLED') + + def _cancel_after_first_response(stub): request_response_sizes = (31415, 9, 2653, 58979,) request_payload_sizes = (27182, 8, 1828, 45904,) @@ -302,7 +301,6 @@ def _timeout_on_sleeping_server(stub): response_type=messages_pb2.COMPRESSABLE, payload=messages_pb2.Payload(body=b'\x00' * request_payload_size)) pipe.add(request) - time.sleep(0.1) try: next(response_iterator) except grpc.RpcError as rpc_error: diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py index 64fd97256e..f8ae05bb7a 100644 --- a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py @@ -44,7 +44,7 @@ import threading import unittest import grpc -from grpc.tools import protoc +from grpc_tools import protoc from tests.unit.framework.common import test_constants _MESSAGES_IMPORT = b'import "messages.proto";' diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py index 87264cf9ba..c7bfeaeb95 100644 --- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py +++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py @@ -27,14 +27,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests of grpc.reflection.v1alpha.reflection.""" +"""Tests of grpc_reflection.v1alpha.reflection.""" import unittest import grpc from grpc.framework.foundation import logging_pool -from grpc.reflection.v1alpha import reflection -from grpc.reflection.v1alpha import reflection_pb2 +from grpc_reflection.v1alpha import reflection +from grpc_reflection.v1alpha import reflection_pb2 from google.protobuf import descriptor_pool from google.protobuf import descriptor_pb2 diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index 04a2e44178..0109ee2173 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -1,7 +1,7 @@ [ "health_check._health_servicer_test.HealthServicerTest", - "interop._insecure_interop_test.InsecureInteropTest", - "interop._secure_interop_test.SecureInteropTest", + "interop._insecure_intraop_test.InsecureIntraopTest", + "interop._secure_intraop_test.SecureIntraopTest", "protoc_plugin._python_plugin_test.PythonPluginTest", "protoc_plugin._split_definitions_test.SameCommonTest", "protoc_plugin._split_definitions_test.SameSeparateTest", @@ -27,7 +27,7 @@ "unit._cython.cygrpc_test.TypeSmokeTest", "unit._empty_message_test.EmptyMessageTest", "unit._exit_test.ExitTest", - "unit._exit_test.ShutdownHandlerTest", + "unit._invalid_metadata_test.InvalidMetadataTest", "unit._metadata_code_details_test.MetadataCodeDetailsTest", "unit._metadata_test.MetadataTest", "unit._rpc_test.RPCTest", diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py index 2fe89499f5..51dc425420 100644 --- a/src/python/grpcio_tests/tests/unit/_api_test.py +++ b/src/python/grpcio_tests/tests/unit/_api_test.py @@ -65,6 +65,7 @@ class AllTest(unittest.TestCase): 'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler', + 'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler', diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py index e0a7d15aa7..46a964db8c 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py @@ -64,7 +64,7 @@ class ChannelReadyFutureTest(unittest.TestCase): ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(grpc.FutureTimeoutError): - ready_future.result(test_constants.SHORT_TIMEOUT) + ready_future.result(timeout=test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) @@ -85,7 +85,7 @@ class ChannelReadyFutureTest(unittest.TestCase): ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) - self.assertIsNone(ready_future.result(test_constants.SHORT_TIMEOUT)) + self.assertIsNone(ready_future.result(timeout=test_constants.LONG_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py index 342f5fcc10..5a4a32887c 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_test.py +++ b/src/python/grpcio_tests/tests/unit/_exit_test.py @@ -43,8 +43,6 @@ import threading import time import unittest -import grpc -from grpc.framework.foundation import logging_pool from tests.unit import _exit_scenarios SCENARIO_FILE = os.path.abspath(os.path.join( @@ -54,7 +52,7 @@ BASE_COMMAND = [INTERPRETER, SCENARIO_FILE] BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt'] INIT_TIME = 1.0 -SHUTDOWN_GRACE = 5.0 + processes = [] process_lock = threading.Lock() @@ -184,24 +182,5 @@ class ExitTest(unittest.TestCase): interrupt_and_wait(process) -class _ShutDownHandler(object): - - def __init__(self): - self.seen_handler_grace = None - - def shutdown_handler(self, handler_grace): - self.seen_handler_grace = handler_grace - - -class ShutdownHandlerTest(unittest.TestCase): - - def test_shutdown_handler(self): - server = grpc.server(logging_pool.pool(1)) - handler = _ShutDownHandler() - server.add_shutdown_handler(handler.shutdown_handler) - server.start() - server.stop(0, shutdown_handler_grace=SHUTDOWN_GRACE).wait() - self.assertEqual(SHUTDOWN_GRACE, handler.seen_handler_grace) - if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py new file mode 100644 index 0000000000..2dc225de29 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py @@ -0,0 +1,175 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Test of RPCs made against gRPC Python's application-layer API.""" + +import unittest + +import grpc + +from tests.unit.framework.common import test_constants + +_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2 +_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:] +_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3 +_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3] + +_UNARY_UNARY = '/test/UnaryUnary' +_UNARY_STREAM = '/test/UnaryStream' +_STREAM_UNARY = '/test/StreamUnary' +_STREAM_STREAM = '/test/StreamStream' + + +def _unary_unary_multi_callable(channel): + return channel.unary_unary(_UNARY_UNARY) + + +def _unary_stream_multi_callable(channel): + return channel.unary_stream( + _UNARY_STREAM, + request_serializer=_SERIALIZE_REQUEST, + response_deserializer=_DESERIALIZE_RESPONSE) + + +def _stream_unary_multi_callable(channel): + return channel.stream_unary( + _STREAM_UNARY, + request_serializer=_SERIALIZE_REQUEST, + response_deserializer=_DESERIALIZE_RESPONSE) + + +def _stream_stream_multi_callable(channel): + return channel.stream_stream(_STREAM_STREAM) + + +class InvalidMetadataTest(unittest.TestCase): + + def setUp(self): + self._channel = grpc.insecure_channel('localhost:8080') + self._unary_unary = _unary_unary_multi_callable(self._channel) + self._unary_stream = _unary_stream_multi_callable(self._channel) + self._stream_unary = _stream_unary_multi_callable(self._channel) + self._stream_stream = _stream_stream_multi_callable(self._channel) + + def testUnaryRequestBlockingUnaryResponse(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._unary_unary(request, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testUnaryRequestBlockingUnaryResponseWithCall(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._unary_unary.with_call(request, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testUnaryRequestFutureUnaryResponse(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_future = self._unary_unary.future(request, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_future.details(), expected_error_details) + self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL) + + def testUnaryRequestStreamResponse(self): + request = b'\x37\x58' + metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_iterator = self._unary_stream(request, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + next(response_iterator) + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_iterator.details(), expected_error_details) + self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL) + + def testStreamRequestBlockingUnaryResponse(self): + request_iterator = (b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._stream_unary(request_iterator, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testStreamRequestBlockingUnaryResponseWithCall(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),) + expected_error_details = "metadata was invalid: %s" % metadata + multi_callable = _stream_unary_multi_callable(self._channel) + with self.assertRaises(ValueError) as exception_context: + multi_callable.with_call(request_iterator, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testStreamRequestFutureUnaryResponse(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_future = self._stream_unary.future( + request_iterator, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_future.details(), expected_error_details) + self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL) + + def testStreamRequestStreamResponse(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestStreamResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_iterator = self._stream_stream(request_iterator, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + next(response_iterator) + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_iterator.details(), expected_error_details) + self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py index 90fe10c77c..9cce96cc85 100644 --- a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py @@ -66,7 +66,7 @@ class ChannelConnectivityTest(unittest.TestCase): ready_future = utilities.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(future.TimeoutError): - ready_future.result(test_constants.SHORT_TIMEOUT) + ready_future.result(timeout=test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) @@ -88,7 +88,7 @@ class ChannelConnectivityTest(unittest.TestCase): ready_future = utilities.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) self.assertIsNone( - ready_future.result(test_constants.SHORT_TIMEOUT)) + ready_future.result(timeout=test_constants.LONG_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c index f97890e4a2..47fd6d9120 100644 --- a/src/ruby/ext/grpc/rb_byte_buffer.c +++ b/src/ruby/ext/grpc/rb_byte_buffer.c @@ -65,5 +65,6 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) { GRPC_SLICE_LENGTH(next)); grpc_slice_unref(next); } + grpc_byte_buffer_reader_destroy(&reader); return rb_string; } diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c index 2a6a246e67..c7b112c94b 100644 --- a/src/ruby/ext/grpc/rb_server.c +++ b/src/ruby/ext/grpc/rb_server.c @@ -37,6 +37,7 @@ #include "rb_server.h" #include <grpc/grpc.h> +#include <grpc/support/atm.h> #include <grpc/grpc_security.h> #include <grpc/support/log.h> #include "rb_call.h" @@ -59,22 +60,26 @@ typedef struct grpc_rb_server { /* The actual server */ grpc_server *wrapped; grpc_completion_queue *queue; + gpr_atm shutdown_started; } grpc_rb_server; static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) { grpc_event ev; - if (server->wrapped != NULL) { - grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL); - ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL); - if (ev.type == GRPC_QUEUE_TIMEOUT) { - grpc_server_cancel_all_calls(server->wrapped); - rb_completion_queue_pluck(server->queue, NULL, - gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + // This can be started by app or implicitly by GC. Avoid a race between these. + if (gpr_atm_full_fetch_add(&server->shutdown_started, (gpr_atm)1) == 0) { + if (server->wrapped != NULL) { + grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL); + ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL); + if (ev.type == GRPC_QUEUE_TIMEOUT) { + grpc_server_cancel_all_calls(server->wrapped); + rb_completion_queue_pluck(server->queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + } + grpc_server_destroy(server->wrapped); + grpc_rb_completion_queue_destroy(server->queue); + server->wrapped = NULL; + server->queue = NULL; } - grpc_server_destroy(server->wrapped); - grpc_rb_completion_queue_destroy(server->queue); - server->wrapped = NULL; - server->queue = NULL; } } @@ -115,6 +120,7 @@ static const rb_data_type_t grpc_rb_server_data_type = { static VALUE grpc_rb_server_alloc(VALUE cls) { grpc_rb_server *wrapper = ALLOC(grpc_rb_server); wrapper->wrapped = NULL; + wrapper->shutdown_started = (gpr_atm)0; return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper); } diff --git a/src/ruby/lib/grpc/errors.rb b/src/ruby/lib/grpc/errors.rb index 23b2bb7e12..f6998e17c4 100644 --- a/src/ruby/lib/grpc/errors.rb +++ b/src/ruby/lib/grpc/errors.rb @@ -35,9 +35,18 @@ module GRPC # either end of a GRPC connection. When raised, it indicates that a status # error should be returned to the other end of a GRPC connection; when # caught it means that this end received a status error. + # + # There is also subclass of BadStatus in this module for each GRPC status. + # E.g., the GRPC::Cancelled class corresponds to status CANCELLED. + # + # See + # https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/status.h + # for detailed descriptions of each status code. class BadStatus < StandardError attr_reader :code, :details, :metadata + include GRPC::Core::StatusCodes + # @param code [Numeric] the status code # @param details [String] the details of the exception # @param metadata [Hash] the error's metadata @@ -55,9 +64,152 @@ module GRPC def to_status Struct::Status.new(code, details, @metadata) end + + def self.new_status_exception(code, details = 'unkown cause', metadata = {}) + codes = {} + codes[OK] = Ok + codes[CANCELLED] = Cancelled + codes[UNKNOWN] = Unknown + codes[INVALID_ARGUMENT] = InvalidArgument + codes[DEADLINE_EXCEEDED] = DeadlineExceeded + codes[NOT_FOUND] = NotFound + codes[ALREADY_EXISTS] = AlreadyExists + codes[PERMISSION_DENIED] = PermissionDenied + codes[UNAUTHENTICATED] = Unauthenticated + codes[RESOURCE_EXHAUSTED] = ResourceExhausted + codes[FAILED_PRECONDITION] = FailedPrecondition + codes[ABORTED] = Aborted + codes[OUT_OF_RANGE] = OutOfRange + codes[UNIMPLEMENTED] = Unimplemented + codes[INTERNAL] = Internal + codes[UNIMPLEMENTED] = Unimplemented + codes[UNAVAILABLE] = Unavailable + codes[DATA_LOSS] = DataLoss + + if codes[code].nil? + BadStatus.new(code, details, metadata) + else + codes[code].new(details, metadata) + end + end + end + + # GRPC status code corresponding to status OK + class Ok < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::OK, details, metadata) + end end - # Cancelled is an exception class that indicates that an rpc was cancelled. - class Cancelled < StandardError + # GRPC status code corresponding to status CANCELLED + class Cancelled < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::CANCELLED, details, metadata) + end + end + + # GRPC status code corresponding to status UNKNOWN + class Unknown < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNKNOWN, details, metadata) + end + end + + # GRPC status code corresponding to status INVALID_ARGUMENT + class InvalidArgument < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::INVALID_ARGUMENT, details, metadata) + end + end + + # GRPC status code corresponding to status DEADLINE_EXCEEDED + class DeadlineExceeded < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::DEADLINE_EXCEEDED, details, metadata) + end + end + + # GRPC status code corresponding to status NOT_FOUND + class NotFound < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::NOT_FOUND, details, metadata) + end + end + + # GRPC status code corresponding to status ALREADY_EXISTS + class AlreadyExists < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::ALREADY_EXISTS, details, metadata) + end + end + + # GRPC status code corresponding to status PERMISSION_DENIED + class PermissionDenied < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::PERMISSION_DENIED, details, metadata) + end + end + + # GRPC status code corresponding to status UNAUTHENTICATED + class Unauthenticated < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNAUTHENTICATED, details, metadata) + end + end + + # GRPC status code corresponding to status RESOURCE_EXHAUSTED + class ResourceExhausted < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::RESOURCE_EXHAUSTED, details, metadata) + end + end + + # GRPC status code corresponding to status FAILED_PRECONDITION + class FailedPrecondition < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::FAILED_PRECONDITION, details, metadata) + end + end + + # GRPC status code corresponding to status ABORTED + class Aborted < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::ABORTED, details, metadata) + end + end + + # GRPC status code corresponding to status OUT_OF_RANGE + class OutOfRange < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::OUT_OF_RANGE, details, metadata) + end + end + + # GRPC status code corresponding to status UNIMPLEMENTED + class Unimplemented < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNIMPLEMENTED, details, metadata) + end + end + + # GRPC status code corresponding to status INTERNAL + class Internal < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::INTERNAL, details, metadata) + end + end + + # GRPC status code corresponding to status UNAVAILABLE + class Unavailable < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNAVAILABLE, details, metadata) + end + end + + # GRPC status code corresponding to status DATA_LOSS + class DataLoss < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::DATA_LOSS, details, metadata) + end end end diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index f5c426ebfc..3b31f77ec0 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -43,8 +43,8 @@ class Struct GRPC.logger.debug("Failing with status #{status}") # raise BadStatus, propagating the metadata if present. md = status.metadata - fail GRPC::BadStatus.new(status.code, status.details, md), - "status code: #{status.code}, details: #{status.details}" + fail GRPC::BadStatus.new_status_exception( + status.code, status.details, md) end status end diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index d7cd9e6df2..adc77bbf5d 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -200,6 +200,7 @@ module GRPC if is_client batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil) @call.status = batch_result.status + @call.trailing_metadata = @call.status.metadata if @call.status batch_result.check_status GRPC.logger.debug("bidi-read-loop: done status #{@call.status}") end @@ -219,6 +220,10 @@ module GRPC GRPC.logger.debug('bidi-read-loop: finished') @reads_complete = true finished + # Make sure that the write loop is done done before finishing the call. + # Note that blocking is ok at this point because we've already received + # a status + @enq_th.join if is_client end end end diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb index cd17aed8e7..d46c4a1b5c 100644 --- a/src/ruby/lib/grpc/generic/rpc_desc.rb +++ b/src/ruby/lib/grpc/generic/rpc_desc.rb @@ -119,7 +119,7 @@ module GRPC # Send back a UNKNOWN status to the client GRPC.logger.warn("failed handler: #{active_call}; sending status:UNKNOWN") GRPC.logger.warn(e) - send_status(active_call, UNKNOWN, 'unkown error handling call on server') + send_status(active_call, UNKNOWN, "#{e.class}: #{e.message}") end def assert_arity_matches(mth) diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb index 7cb9f1cc99..f5a6b49eb4 100644 --- a/src/ruby/lib/grpc/generic/service.rb +++ b/src/ruby/lib/grpc/generic/service.rb @@ -110,8 +110,9 @@ module GRPC rpc_descs[name] = RpcDesc.new(name, input, output, marshal_class_method, unmarshal_class_method) - define_method(name) do - fail GRPC::BadStatus, GRPC::Core::StatusCodes::UNIMPLEMENTED + define_method(GenericService.underscore(name.to_s).to_sym) do |_, _| + fail GRPC::BadStatus.new_status_exception( + GRPC::Core::StatusCodes::UNIMPLEMENTED) end end diff --git a/src/ruby/pb/grpc/health/checker.rb b/src/ruby/pb/grpc/health/checker.rb index 4bce1744c4..6b2d852ebf 100644 --- a/src/ruby/pb/grpc/health/checker.rb +++ b/src/ruby/pb/grpc/health/checker.rb @@ -52,7 +52,9 @@ module Grpc @status_mutex.synchronize do status = @statuses["#{req.service}"] end - fail GRPC::BadStatus, StatusCodes::NOT_FOUND if status.nil? + if status.nil? + fail GRPC::BadStatus.new_status_exception(StatusCodes::NOT_FOUND) + end HealthCheckResponse.new(status: status) end diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb index 1e3ae65630..f101f9d89e 100755 --- a/src/ruby/pb/test/client.rb +++ b/src/ruby/pb/test/client.rb @@ -459,11 +459,8 @@ class NamedTests deadline = GRPC::Core::TimeConsts::from_relative_time(1) resps = @stub.full_duplex_call(enum.each_item, deadline: deadline) resps.each { } # wait to receive each request (or timeout) - fail 'Should have raised GRPC::BadStatus(DEADLINE_EXCEEDED)' - rescue GRPC::BadStatus => e - assert("#{__callee__}: status was wrong") do - e.code == GRPC::Core::StatusCodes::DEADLINE_EXCEEDED - end + fail 'Should have raised GRPC::DeadlineExceeded' + rescue GRPC::DeadlineExceeded end def empty_stream diff --git a/src/ruby/qps/client.rb b/src/ruby/qps/client.rb index 8aed866da5..817192626b 100644 --- a/src/ruby/qps/client.rb +++ b/src/ruby/qps/client.rb @@ -134,6 +134,7 @@ class BenchmarkClient resp = stub.streaming_call(q.each_item) start = Time.now q.push(req) + pushed_sentinal = false resp.each do |r| @histogram.add((Time.now-start)*1e9) if !@done @@ -141,8 +142,9 @@ class BenchmarkClient start = Time.now q.push(req) else - q.push(self) - break + q.push(self) unless pushed_sentinal + # Continue polling on the responses to consume and release resources + pushed_sentinal = true end end end diff --git a/tools/run_tests/prepare_travis.sh b/src/ruby/spec/error_sanity_spec.rb index 10546535e8..77e94a8816 100755..100644 --- a/tools/run_tests/prepare_travis.sh +++ b/src/ruby/spec/error_sanity_spec.rb @@ -1,4 +1,3 @@ -#!/bin/bash # Copyright 2015, Google Inc. # All rights reserved. # @@ -28,40 +27,38 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -cd `dirname $0`/../.. -grpc_dir=`pwd` +require 'grpc' -distrib=`md5sum /etc/issue | cut -f1 -d\ ` -echo "Configuring for distribution $distrib" -git submodule | while read sha path extra ; do - cd /tmp - name=`basename $path` - file=$name-$sha-$CONFIG-prebuilt-$distrib.tar.gz - echo -n "Looking for $file ..." - url=http://storage.googleapis.com/grpc-prebuilt-packages/$file - wget -q $url && ( - echo " Found." - tar xfz $file - ) || echo " Not found." -done +StatusCodes = GRPC::Core::StatusCodes -mkdir -p bins/$CONFIG/protobuf -mkdir -p libs/$CONFIG/protobuf -mkdir -p libs/$CONFIG/openssl +describe StatusCodes do + # convert upper snake-case to camel case. + # e.g., DEADLINE_EXCEEDED -> DeadlineExceeded + def upper_snake_to_camel(name) + name.to_s.split('_').map(&:downcase).map(&:capitalize).join('') + end -function cpt { - cp /tmp/prebuilt/$1 $2/$CONFIG/$3 - touch $2/$CONFIG/$3/`basename $1` -} + StatusCodes.constants.each do |status_name| + it 'there is a subclass of BadStatus corresponding to StatusCode: ' \ + "#{status_name} that has code: #{StatusCodes.const_get(status_name)}" do + camel_case = upper_snake_to_camel(status_name) + error_class = GRPC.const_get(camel_case) + # expect the error class to be a subclass of BadStatus + expect(error_class < GRPC::BadStatus) -if [ -e /tmp/prebuilt/bin/protoc ] ; then - touch third_party/protobuf/configure - cpt bin/protoc bins protobuf - cpt lib/libprotoc.a libs protobuf - cpt lib/libprotobuf.a libs protobuf -fi + error_object = error_class.new + # check that the code matches the int value of the error's constant + status_code = StatusCodes.const_get(status_name) + expect(error_object.code).to eq(status_code) -if [ -e /tmp/prebuilt/lib/libssl.a ] ; then - cpt lib/libcrypto.a libs openssl - cpt lib/libssl.a libs openssl -fi + # check default parameters + expect(error_object.details).to eq('unknown cause') + expect(error_object.metadata).to eq({}) + + # check that the BadStatus factory for creates the correct + # exception too + from_factory = GRPC::BadStatus.new_status_exception(status_code) + expect(from_factory.is_a?(error_class)).to be(true) + end + end +end diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 607a4a3c5d..b51b291cbd 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -190,15 +190,14 @@ describe 'ClientStub' do end creds = GRPC::Core::CallCredentials.new(failing_auth) - error_occured = false + unauth_error_occured = false begin get_response(stub, credentials: creds) - rescue GRPC::BadStatus => e - error_occured = true - expect(e.code).to eq(GRPC::Core::StatusCodes::UNAUTHENTICATED) + rescue GRPC::Unauthenticated => e + unauth_error_occured = true expect(e.details.include?(error_message)).to be true end - expect(error_occured).to eq(true) + expect(unauth_error_occured).to eq(true) # Kill the server thread so tests can complete th.kill diff --git a/src/ruby/spec/generic/rpc_desc_spec.rb b/src/ruby/spec/generic/rpc_desc_spec.rb index a3f0efa603..1ace7211e9 100644 --- a/src/ruby/spec/generic/rpc_desc_spec.rb +++ b/src/ruby/spec/generic/rpc_desc_spec.rb @@ -48,7 +48,6 @@ describe GRPC::RpcDesc do @bidi_streamer = RpcDesc.new('ss', Stream.new(Object.new), Stream.new(Object.new), 'encode', 'decode') @bs_code = INTERNAL - @no_reason = 'unkown error handling call on server' @ok_response = Object.new end @@ -62,8 +61,9 @@ describe GRPC::RpcDesc do it 'sends status UNKNOWN if other StandardErrors are raised' do expect(@call).to receive(:remote_read).once.and_return(Object.new) - expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason, - false, metadata: {}) + expect(@call).to receive(:send_status).once.with(UNKNOWN, + arg_error_msg, + false, metadata: {}) this_desc.run_server_method(@call, method(:other_error)) end @@ -112,7 +112,7 @@ describe GRPC::RpcDesc do end it 'sends status UNKNOWN if other StandardErrors are raised' do - expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason, + expect(@call).to receive(:send_status).once.with(UNKNOWN, arg_error_msg, false, metadata: {}) @client_streamer.run_server_method(@call, method(:other_error_alt)) end @@ -174,8 +174,9 @@ describe GRPC::RpcDesc do end it 'sends status UNKNOWN if other StandardErrors are raised' do + error_msg = arg_error_msg(StandardError.new) expect(@call).to receive(:run_server_bidi).and_raise(StandardError) - expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason, + expect(@call).to receive(:send_status).once.with(UNKNOWN, error_msg, false, metadata: {}) @bidi_streamer.run_server_method(@call, method(:other_error_alt)) end @@ -342,4 +343,9 @@ describe GRPC::RpcDesc do def other_error_alt(_call) fail(ArgumentError, 'other error') end + + def arg_error_msg(error = nil) + error ||= ArgumentError.new('other error') + "#{error.class}: #{error.message}" + end end diff --git a/src/ruby/spec/generic/rpc_server_pool_spec.rb b/src/ruby/spec/generic/rpc_server_pool_spec.rb index 48ccaee510..69e8222cb9 100644 --- a/src/ruby/spec/generic/rpc_server_pool_spec.rb +++ b/src/ruby/spec/generic/rpc_server_pool_spec.rb @@ -94,18 +94,6 @@ describe GRPC::Pool do expect(q.pop).to be(o) p.stop end - - it 'it throws an error if all of the workers have tasks to do' do - p = Pool.new(5) - p.start - job = proc {} - 5.times do - expect(p.ready_for_work?).to be(true) - p.schedule(&job) - end - expect { p.schedule(&job) }.to raise_error - expect { p.schedule(&job) }.to raise_error - end end describe '#stop' do diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index c5694790fd..806ea8ce9f 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -408,21 +408,21 @@ describe GRPC::RpcServer do req = EchoMsg.new n = 20 # arbitrary, use as many to ensure the server pool is exceeded threads = [] - bad_status_code = nil + one_failed_as_unavailable = false n.times do threads << Thread.new do stub = SlowStub.new(alt_host, :this_channel_is_insecure) begin stub.an_rpc(req) - rescue GRPC::BadStatus => e - bad_status_code = e.code + rescue GRPC::ResourceExhausted + one_failed_as_unavailable = true end end end threads.each(&:join) alt_srv.stop t.join - expect(bad_status_code).to be(StatusCodes::RESOURCE_EXHAUSTED) + expect(one_failed_as_unavailable).to be(true) end end diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb index 4711e09e88..719510001c 100644 --- a/src/ruby/spec/pb/health/checker_spec.rb +++ b/src/ruby/spec/pb/health/checker_spec.rb @@ -122,7 +122,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -141,7 +141,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -163,7 +163,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -214,7 +214,7 @@ describe Grpc::Health::Checker do stub.check(HCReq.new(service: 'unknown')) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg @srv.stop t.join end diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb index c891c1bf5e..c2be0afa72 100644 --- a/src/ruby/spec/spec_helper.rb +++ b/src/ruby/spec/spec_helper.rb @@ -67,3 +67,5 @@ RSpec.configure do |config| end RSpec::Expectations.configuration.warn_about_potential_false_positives = false + +Thread.abort_on_exception = true diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index fbad1a3f70..1b97d18f16 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -62,7 +62,7 @@ %> Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -71,7 +71,9 @@ s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + # TODO(mxyan): Change back to "v#{version}" for next release + #:tag => "v#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template index 62d61b75c1..82fbb69008 100644 --- a/templates/grpc.gemspec.template +++ b/templates/grpc.gemspec.template @@ -29,7 +29,7 @@ s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb ) s.platform = Gem::Platform::RUBY - s.add_dependency 'google-protobuf', '~> 3.0.2' + s.add_dependency 'google-protobuf', '~> 3.1.0' s.add_dependency 'googleauth', '~> 0.5.1' s.add_development_dependency 'bundler', '~> 1.9' diff --git a/templates/tools/dockerfile/clang_format.include b/templates/tools/dockerfile/clang_format.include new file mode 100644 index 0000000000..9a2b60ba8c --- /dev/null +++ b/templates/tools/dockerfile/clang_format.include @@ -0,0 +1,5 @@ +RUN apt-get update && apt-get -y install wget +RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN apt-get update && apt-get -y install clang-format-3.8 diff --git a/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template new file mode 100644 index 0000000000..8360fc121c --- /dev/null +++ b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template @@ -0,0 +1,37 @@ +%YAML 1.2 +--- | + # Copyright 2015, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + FROM ubuntu:15.10 + + <%include file="../clang_format.include"/> + ADD clang_format_all_the_things.sh / + CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] + diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template index 0168353933..8617666b21 100644 --- a/templates/tools/dockerfile/test/sanity/Dockerfile.template +++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template @@ -52,18 +52,12 @@ # ./compile.sh without a local protoc dependency # TODO(mattkwong): install dependencies to support latest Bazel version if newer # version is needed - RUN git clone https://github.com/bazelbuild/bazel.git /bazel && \ + RUN git clone https://github.com/bazelbuild/bazel.git /bazel && ${"\\"} cd /bazel && git checkout tags/0.4.1 && ./compile.sh RUN ln -s /bazel/output/bazel /bin/ - #=================== - # Docker "inception" - # Note this is quite the ugly hack. - # This makes sure that the docker binary we inject has its dependencies. - RUN curl https://get.docker.com/ | sh - RUN apt-get remove --purge -y docker-engine - - RUN mkdir /var/local/jenkins + <%include file="../../clang_format.include"/> + <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] diff --git a/templates/tools/run_tests/configs.json.template b/templates/tools/run_tests/generated/configs.json.template index 5c82dfb347..5c82dfb347 100644 --- a/templates/tools/run_tests/configs.json.template +++ b/templates/tools/run_tests/generated/configs.json.template diff --git a/templates/tools/run_tests/sources_and_headers.json.template b/templates/tools/run_tests/generated/sources_and_headers.json.template index 1c5c9747d6..1c5c9747d6 100644 --- a/templates/tools/run_tests/sources_and_headers.json.template +++ b/templates/tools/run_tests/generated/sources_and_headers.json.template diff --git a/templates/tools/run_tests/tests.json.template b/templates/tools/run_tests/generated/tests.json.template index 1e21465dd2..1e21465dd2 100644 --- a/templates/tools/run_tests/tests.json.template +++ b/templates/tools/run_tests/generated/tests.json.template diff --git a/templates/vsprojects/protobuf.props.template b/templates/vsprojects/protobuf.props.template index 48f9431c1c..3ae7c745de 100644 --- a/templates/vsprojects/protobuf.props.template +++ b/templates/vsprojects/protobuf.props.template @@ -6,7 +6,7 @@ <ItemDefinitionGroup>
<Link>
<AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup />
diff --git a/templates/vsprojects/protoc.props.template b/templates/vsprojects/protoc.props.template index ced6028a4b..2c3844a9a4 100644 --- a/templates/vsprojects/protoc.props.template +++ b/templates/vsprojects/protoc.props.template @@ -9,7 +9,7 @@ </ClCompile>
<Link>
<AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup />
diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 07fcd995d7..d579dcc7d4 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -148,7 +148,8 @@ void grpc_run_bad_client_test( grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_add(&outgoing, slice); - grpc_closure_init(&done_write_closure, done_write, &a); + grpc_closure_init(&done_write_closure, done_write, &a, + grpc_schedule_on_exec_ctx); /* Write data */ grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure); @@ -175,7 +176,8 @@ void grpc_run_bad_client_test( grpc_slice_buffer_init(&args.incoming); gpr_event_init(&args.read_done); grpc_closure read_done_closure; - grpc_closure_init(&read_done_closure, read_done, &args); + grpc_closure_init(&read_done_closure, read_done, &args, + grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming, &read_done_closure); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c index 0840820cca..b43d05eec3 100644 --- a/test/core/channel/channel_stack_test.c +++ b/test/core/channel/channel_stack_test.c @@ -41,9 +41,9 @@ #include "test/core/util/test_config.h" -static void channel_init_func(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *channel_init_func(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(args->channel_args->num_args == 1); GPR_ASSERT(args->channel_args->args[0].type == GRPC_ARG_INTEGER); GPR_ASSERT(0 == strcmp(args->channel_args->args[0].key, "test_key")); @@ -51,6 +51,7 @@ static void channel_init_func(grpc_exec_ctx *exec_ctx, GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); *(int *)(elem->channel_data) = 0; + return GRPC_ERROR_NONE; } static grpc_error *call_init_func(grpc_exec_ctx *exec_ctx, diff --git a/test/core/client_channel/lb_policies_test.c b/test/core/client_channel/lb_policies_test.c index 6e4058fc21..016610763c 100644 --- a/test/core/client_channel/lb_policies_test.c +++ b/test/core/client_channel/lb_policies_test.c @@ -241,6 +241,8 @@ static request_sequences request_sequences_create(size_t n) { res.n = n; res.connections = gpr_malloc(sizeof(*res.connections) * n); res.connectivity_states = gpr_malloc(sizeof(*res.connectivity_states) * n); + memset(res.connections, 0, sizeof(*res.connections) * n); + memset(res.connectivity_states, 0, sizeof(*res.connectivity_states) * n); return res; } @@ -782,17 +784,15 @@ static void verify_total_carnage_round_robin(const servers_fixture *f, } } - /* no server is ever available. The persistent state is TRANSIENT_FAILURE. May - * also be CONNECTING if, under load, this check took too long to run and some - * subchannel already transitioned to retrying. */ + /* No server is ever available. There should be no READY states (or SHUTDOWN). + * Note that all other states (IDLE, CONNECTING, TRANSIENT_FAILURE) are still + * possible, as the policy transitions while attempting to reconnect. */ for (size_t i = 0; i < sequences->n; i++) { const grpc_connectivity_state actual = sequences->connectivity_states[i]; - if (actual != GRPC_CHANNEL_TRANSIENT_FAILURE && - actual != GRPC_CHANNEL_CONNECTING) { + if (actual == GRPC_CHANNEL_READY || actual == GRPC_CHANNEL_SHUTDOWN) { gpr_log(GPR_ERROR, - "CONNECTIVITY STATUS SEQUENCE FAILURE: expected " - "GRPC_CHANNEL_TRANSIENT_FAILURE or GRPC_CHANNEL_CONNECTING, got " - "'%s' at iteration #%d", + "CONNECTIVITY STATUS SEQUENCE FAILURE: got unexpected state " + "'%s' at iteration #%d.", grpc_connectivity_state_name(actual), (int)i); abort(); } @@ -841,17 +841,15 @@ static void verify_partial_carnage_round_robin( abort(); } - /* ... and that the last one should be TRANSIENT_FAILURE, after all servers - * are gone. May also be CONNECTING if, under load, this check took too long - * to run and the subchannel already transitioned to retrying. */ + /* ... and that the last one shouldn't be READY (or SHUTDOWN): all servers are + * gone. It may be all other states (IDLE, CONNECTING, TRANSIENT_FAILURE), as + * the policy transitions while attempting to reconnect. */ actual = sequences->connectivity_states[num_iters - 1]; for (i = 0; i < sequences->n; i++) { - if (actual != GRPC_CHANNEL_TRANSIENT_FAILURE && - actual != GRPC_CHANNEL_CONNECTING) { + if (actual == GRPC_CHANNEL_READY || actual == GRPC_CHANNEL_SHUTDOWN) { gpr_log(GPR_ERROR, - "CONNECTIVITY STATUS SEQUENCE FAILURE: expected " - "GRPC_CHANNEL_TRANSIENT_FAILURE or GRPC_CHANNEL_CONNECTING, got " - "'%s' at iteration #%d", + "CONNECTIVITY STATUS SEQUENCE FAILURE: got unexpected state " + "'%s' at iteration #%d.", grpc_connectivity_state_name(actual), (int)i); abort(); } @@ -948,8 +946,8 @@ int main(int argc, char **argv) { const size_t NUM_ITERS = 10; const size_t NUM_SERVERS = 4; - grpc_test_init(argc, argv); grpc_init(); + grpc_test_init(argc, argv); grpc_tracer_set_enabled("round_robin", 1); GPR_ASSERT(grpc_lb_policy_create(&exec_ctx, "this-lb-policy-does-not-exist", diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c index ffa167a0e7..169323e0f7 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c @@ -63,7 +63,8 @@ static grpc_error *my_resolve_address(const char *name, const char *addr, } } -static grpc_resolver *create_resolver(const char *name) { +static grpc_resolver *create_resolver(grpc_exec_ctx *exec_ctx, + const char *name) { grpc_resolver_factory *factory = grpc_resolver_factory_lookup("dns"); grpc_uri *uri = grpc_uri_parse(name, 0); GPR_ASSERT(uri); @@ -71,7 +72,7 @@ static grpc_resolver *create_resolver(const char *name) { memset(&args, 0, sizeof(args)); args.uri = uri; grpc_resolver *resolver = - grpc_resolver_factory_create_resolver(factory, &args); + grpc_resolver_factory_create_resolver(exec_ctx, factory, &args); grpc_resolver_factory_unref(factory); grpc_uri_destroy(uri); return resolver; @@ -101,24 +102,24 @@ int main(int argc, char **argv) { grpc_init(); gpr_mu_init(&g_mu); grpc_blocking_resolve_address = my_resolve_address; - - grpc_resolver *resolver = create_resolver("dns:test"); - grpc_channel_args *result = (grpc_channel_args *)1; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test"); gpr_event ev1; gpr_event_init(&ev1); - grpc_resolver_next(&exec_ctx, resolver, &result, - grpc_closure_create(on_done, &ev1)); + grpc_resolver_next( + &exec_ctx, resolver, &result, + grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(wait_loop(5, &ev1)); GPR_ASSERT(result == NULL); gpr_event ev2; gpr_event_init(&ev2); - grpc_resolver_next(&exec_ctx, resolver, &result, - grpc_closure_create(on_done, &ev2)); + grpc_resolver_next( + &exec_ctx, resolver, &result, + grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(wait_loop(30, &ev2)); GPR_ASSERT(result != NULL); diff --git a/test/core/client_channel/resolvers/dns_resolver_test.c b/test/core/client_channel/resolvers/dns_resolver_test.c index 41a9125431..5603a57b5f 100644 --- a/test/core/client_channel/resolvers/dns_resolver_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_test.c @@ -48,7 +48,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver != NULL); GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds"); grpc_uri_destroy(uri); @@ -65,7 +65,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver == NULL); grpc_uri_destroy(uri); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.c b/test/core/client_channel/resolvers/sockaddr_resolver_test.c index ebf311ab83..d6c8920ad0 100644 --- a/test/core/client_channel/resolvers/sockaddr_resolver_test.c +++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.c @@ -49,11 +49,6 @@ typedef struct on_resolution_arg { void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { on_resolution_arg *res = arg; - const grpc_arg *channel_arg = - grpc_channel_args_find(res->resolver_result, GRPC_ARG_SERVER_NAME); - GPR_ASSERT(channel_arg != NULL); - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - GPR_ASSERT(strcmp(res->expected_server_name, channel_arg->value.string) == 0); grpc_channel_args_destroy(res->resolver_result); } @@ -67,14 +62,14 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver != NULL); on_resolution_arg on_res_arg; memset(&on_res_arg, 0, sizeof(on_res_arg)); on_res_arg.expected_server_name = uri->path; - grpc_closure *on_resolution = - grpc_closure_create(on_resolution_cb, &on_res_arg); + grpc_closure *on_resolution = grpc_closure_create( + on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx); grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result, on_resolution); @@ -93,7 +88,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver == NULL); grpc_uri_destroy(uri); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/client_channel/set_initial_connect_string_test.c b/test/core/client_channel/set_initial_connect_string_test.c index b16a3ebf45..2082f65458 100644 --- a/test/core/client_channel/set_initial_connect_string_test.c +++ b/test/core/client_channel/set_initial_connect_string_test.c @@ -92,8 +92,9 @@ static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); test_tcp_server *server = arg; - grpc_closure_init(&on_read, handle_read, NULL); + grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&state.incoming_buffer); grpc_slice_buffer_init(&state.temp_incoming_buffer); state.tcp = tcp; diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c index 1c4a17fda8..f6a9cbeef9 100644 --- a/test/core/end2end/bad_server_response_test.c +++ b/test/core/end2end/bad_server_response_test.c @@ -145,9 +145,10 @@ static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); test_tcp_server *server = arg; - grpc_closure_init(&on_read, handle_read, NULL); - grpc_closure_init(&on_write, done_write, NULL); + grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx); + grpc_closure_init(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&state.temp_incoming_buffer); grpc_slice_buffer_init(&state.outgoing_buffer); state.tcp = tcp; diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c index 865b55de4d..45d48720c6 100644 --- a/test/core/end2end/fake_resolver.c +++ b/test/core/end2end/fake_resolver.c @@ -87,7 +87,7 @@ static void fake_resolver_shutdown(grpc_exec_ctx* exec_ctx, gpr_mu_lock(&r->mu); if (r->next_completion != NULL) { *r->target_result = NULL; - grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); r->next_completion = NULL; } gpr_mu_unlock(&r->mu); @@ -100,7 +100,7 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx, grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); *r->target_result = grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); - grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE); r->next_completion = NULL; } } @@ -140,7 +140,8 @@ static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {} static void do_nothing(void* ignored) {} -static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory, +static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx, + grpc_resolver_factory* factory, grpc_resolver_args* args) { if (0 != strcmp(args->uri->authority, "")) { gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme", @@ -181,12 +182,7 @@ static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory, // Instantiate resolver. fake_resolver* r = gpr_malloc(sizeof(fake_resolver)); memset(r, 0, sizeof(*r)); - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = args->uri->path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); r->addresses = addresses; gpr_mu_init(&r->mu); grpc_resolver_init(&r->base, &fake_resolver_vtable); diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c index 57fc4a38f8..ca7d9e9f9a 100644 --- a/test/core/end2end/fixtures/http_proxy.c +++ b/test/core/end2end/fixtures/http_proxy.c @@ -367,6 +367,7 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint, grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor) { + gpr_free(acceptor); grpc_end2end_http_proxy* proxy = arg; // Instantiate proxy_connection. proxy_connection* conn = gpr_malloc(sizeof(*conn)); @@ -375,15 +376,20 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, gpr_ref_init(&conn->refcount, 1); conn->pollset_set = grpc_pollset_set_create(); grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset); - grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn); - grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, - conn); - grpc_closure_init(&conn->on_write_response_done, on_write_response_done, - conn); - grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn); - grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn); - grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn); - grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn); + grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn, + grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&conn->client_read_buffer); grpc_slice_buffer_init(&conn->client_deferred_write_buffer); grpc_slice_buffer_init(&conn->client_write_buffer); @@ -470,7 +476,8 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) { gpr_free(proxy->proxy_name); grpc_channel_args_destroy(proxy->channel_args); grpc_closure destroyed; - grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset); + grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed); gpr_free(proxy); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c index 19ac6ced14..8136f9312c 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.c +++ b/test/core/end2end/fuzzers/api_fuzzer.c @@ -349,11 +349,11 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg, addrs->addrs = gpr_malloc(sizeof(*addrs->addrs)); addrs->addrs[0].len = 0; *r->addrs = addrs; - grpc_exec_ctx_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE); } else { - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, r->on_done, - GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1)); } gpr_free(r->addr); @@ -361,7 +361,9 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg, } void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr, - const char *default_port, grpc_closure *on_done, + const char *default_port, + grpc_pollset_set *interested_parties, + grpc_closure *on_done, grpc_resolved_addresses **addresses) { addr_req *r = gpr_malloc(sizeof(*r)); r->addr = gpr_strdup(addr); @@ -396,7 +398,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { future_connect *fc = arg; if (error != GRPC_ERROR_NONE) { *fc->ep = NULL; - grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error), NULL); + grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error)); } else if (g_server != NULL) { grpc_endpoint *client; grpc_endpoint *server; @@ -408,7 +410,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL); grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL); - grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE); } else { sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline); } @@ -419,8 +421,8 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, gpr_timespec deadline) { if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) { *ep = NULL; - grpc_exec_ctx_sched(exec_ctx, closure, - GRPC_ERROR_CREATE("Connect deadline exceeded"), NULL); + grpc_closure_sched(exec_ctx, closure, + GRPC_ERROR_CREATE("Connect deadline exceeded")); return; } diff --git a/test/core/end2end/invalid_call_argument_test.c b/test/core/end2end/invalid_call_argument_test.c index 765b6ad1be..d974d2c8ff 100644 --- a/test/core/end2end/invalid_call_argument_test.c +++ b/test/core/end2end/invalid_call_argument_test.c @@ -573,6 +573,29 @@ static void test_recv_close_on_server_twice() { cleanup_test(); } +static void test_invalid_initial_metadata_reserved_key() { + gpr_log(GPR_INFO, "test_invalid_initial_metadata_reserved_key"); + + grpc_metadata metadata; + metadata.key = ":start_with_colon"; + metadata.value = "value"; + metadata.value_length = 6; + + grpc_op *op; + prepare_test(1); + op = g_state.ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 1; + op->data.send_initial_metadata.metadata = &metadata; + op->flags = 0; + op->reserved = NULL; + op++; + GPR_ASSERT(GRPC_CALL_ERROR_INVALID_METADATA == + grpc_call_start_batch(g_state.call, g_state.ops, + (size_t)(op - g_state.ops), tag(1), NULL)); + cleanup_test(); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); @@ -595,6 +618,7 @@ int main(int argc, char **argv) { test_send_server_status_twice(); test_recv_close_on_server_with_invalid_flags(); test_recv_close_on_server_twice(); + test_invalid_initial_metadata_reserved_key(); grpc_shutdown(); return 0; diff --git a/test/core/end2end/tests/filter_call_init_fails.c b/test/core/end2end/tests/filter_call_init_fails.c index 41ae575fff..6d9351ed8c 100644 --- a/test/core/end2end/tests/filter_call_init_fails.c +++ b/test/core/end2end/tests/filter_call_init_fails.c @@ -216,9 +216,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, void *and_free_memory) {} -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/end2end/tests/filter_causes_close.c b/test/core/end2end/tests/filter_causes_close.c index bf9fd9073d..7a7129ceb1 100644 --- a/test/core/end2end/tests/filter_causes_close.c +++ b/test/core/end2end/tests/filter_causes_close.c @@ -217,9 +217,9 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg, &message); grpc_call_next_op(exec_ctx, elem, op); } - grpc_exec_ctx_sched( + grpc_closure_sched( exec_ctx, calld->recv_im_ready, - GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1), NULL); + GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1)); } static void start_transport_stream_op(grpc_exec_ctx *exec_ctx, @@ -228,7 +228,8 @@ static void start_transport_stream_op(grpc_exec_ctx *exec_ctx, call_data *calld = elem->call_data; if (op->recv_initial_metadata != NULL) { calld->recv_im_ready = op->recv_initial_metadata_ready; - op->recv_initial_metadata_ready = grpc_closure_create(recv_im_ready, elem); + op->recv_initial_metadata_ready = + grpc_closure_create(recv_im_ready, elem, grpc_schedule_on_exec_ctx); } grpc_call_next_op(exec_ctx, elem, op); } @@ -243,9 +244,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, void *and_free_memory) {} -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/end2end/tests/filter_latency.c b/test/core/end2end/tests/filter_latency.c index ea63d45420..e10204863b 100644 --- a/test/core/end2end/tests/filter_latency.c +++ b/test/core/end2end/tests/filter_latency.c @@ -281,9 +281,11 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&g_mu); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c index 3e312c1dde..4f00cad205 100644 --- a/test/core/http/httpcli_test.c +++ b/test/core/http/httpcli_test.c @@ -90,9 +90,10 @@ static void test_get(int port) { grpc_http_response response; memset(&response, 0, sizeof(response)); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); - grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req, - n_seconds_time(15), - grpc_closure_create(on_finish, &response), &response); + grpc_httpcli_get( + &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15), + grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), + &response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(g_mu); while (!g_done) { @@ -130,9 +131,11 @@ static void test_post(int port) { grpc_http_response response; memset(&response, 0, sizeof(response)); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post"); - grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, - "hello", 5, n_seconds_time(15), - grpc_closure_create(on_finish, &response), &response); + grpc_httpcli_post( + &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5, + n_seconds_time(15), + grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), + &response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(g_mu); while (!g_done) { @@ -207,7 +210,8 @@ int main(int argc, char **argv) { test_post(port); grpc_httpcli_context_destroy(&g_context); - grpc_closure_init(&destroyed, destroy_pops, &g_pops); + grpc_closure_init(&destroyed, destroy_pops, &g_pops, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), &destroyed); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c index d06035149e..53b26b645f 100644 --- a/test/core/http/httpscli_test.c +++ b/test/core/http/httpscli_test.c @@ -91,9 +91,10 @@ static void test_get(int port) { grpc_http_response response; memset(&response, 0, sizeof(response)); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get"); - grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req, - n_seconds_time(15), - grpc_closure_create(on_finish, &response), &response); + grpc_httpcli_get( + &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15), + grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), + &response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(g_mu); while (!g_done) { @@ -132,9 +133,11 @@ static void test_post(int port) { grpc_http_response response; memset(&response, 0, sizeof(response)); grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post"); - grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, - "hello", 5, n_seconds_time(15), - grpc_closure_create(on_finish, &response), &response); + grpc_httpcli_post( + &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5, + n_seconds_time(15), + grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx), + &response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(g_mu); while (!g_done) { @@ -210,7 +213,8 @@ int main(int argc, char **argv) { test_post(port); grpc_httpcli_context_destroy(&g_context); - grpc_closure_init(&destroyed, destroy_pops, &g_pops); + grpc_closure_init(&destroyed, destroy_pops, &g_pops, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), &destroyed); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c index de03c47c13..773ef602b2 100644 --- a/test/core/internal_api_canaries/iomgr.c +++ b/test/core/internal_api_canaries/iomgr.c @@ -60,9 +60,9 @@ static void test_code(void) { closure_list.head = NULL; closure_list.tail = NULL; - grpc_closure_init(&closure, NULL, NULL); + grpc_closure_init(&closure, NULL, NULL, grpc_schedule_on_exec_ctx); - grpc_closure_create(NULL, NULL); + grpc_closure_create(NULL, NULL, grpc_schedule_on_exec_ctx); grpc_closure_list_move(NULL, NULL); grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo")); @@ -72,8 +72,8 @@ static void test_code(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx); - grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL); - grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL); + grpc_closure_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo")); + grpc_closure_list_sched(&exec_ctx, &closure_list); /* endpoint.h */ grpc_endpoint endpoint; @@ -99,7 +99,6 @@ static void test_code(void) { /* executor.h */ grpc_executor_init(); - grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi")); grpc_executor_shutdown(); /* pollset.h */ diff --git a/test/core/iomgr/combiner_test.c b/test/core/iomgr/combiner_test.c index f7d5809be7..9b6d6ff9b4 100644 --- a/test/core/iomgr/combiner_test.c +++ b/test/core/iomgr/combiner_test.c @@ -59,9 +59,10 @@ static void test_execute_one(void) { grpc_combiner *lock = grpc_combiner_create(NULL); bool done = false; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_combiner_execute(&exec_ctx, lock, - grpc_closure_create(set_bool_to_true, &done), - GRPC_ERROR_NONE, false); + grpc_closure_sched(&exec_ctx, + grpc_closure_create(set_bool_to_true, &done, + grpc_combiner_scheduler(lock, false)), + GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(done); grpc_combiner_destroy(&exec_ctx, lock); @@ -94,9 +95,10 @@ static void execute_many_loop(void *a) { ex_args *c = gpr_malloc(sizeof(*c)); c->ctr = &args->ctr; c->value = n++; - grpc_combiner_execute(&exec_ctx, args->lock, - grpc_closure_create(check_one, c), GRPC_ERROR_NONE, - false); + grpc_closure_sched( + &exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler( + args->lock, false)), + GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); } // sleep for a little bit, to test a combiner draining and another thread @@ -134,9 +136,10 @@ static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_combiner_execute_finally(exec_ctx, arg, - grpc_closure_create(in_finally, NULL), - GRPC_ERROR_NONE, false); + grpc_closure_sched(exec_ctx, grpc_closure_create( + in_finally, NULL, + grpc_combiner_finally_scheduler(arg, false)), + GRPC_ERROR_NONE); } static void test_execute_finally(void) { @@ -144,8 +147,10 @@ static void test_execute_finally(void) { grpc_combiner *lock = grpc_combiner_create(NULL); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_combiner_execute(&exec_ctx, lock, grpc_closure_create(add_finally, lock), - GRPC_ERROR_NONE, false); + grpc_closure_sched(&exec_ctx, + grpc_closure_create(add_finally, lock, + grpc_combiner_scheduler(lock, false)), + GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(got_in_finally); grpc_combiner_destroy(&exec_ctx, lock); diff --git a/test/core/iomgr/endpoint_pair_test.c b/test/core/iomgr/endpoint_pair_test.c index 2a257a7cea..f02171f2ef 100644 --- a/test/core/iomgr/endpoint_pair_test.c +++ b/test/core/iomgr/endpoint_pair_test.c @@ -81,7 +81,8 @@ int main(int argc, char **argv) { g_pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(g_pollset, &g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index 8186ea7e85..87a9d79e9b 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -211,9 +211,10 @@ static void read_and_write_test(grpc_endpoint_test_config config, state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; - grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state); + grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state, + grpc_schedule_on_exec_ctx); grpc_closure_init(&state.done_write, read_and_write_test_write_handler, - &state); + &state, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&state.outgoing); grpc_slice_buffer_init(&state.incoming); @@ -290,16 +291,19 @@ static void multiple_shutdown_test(grpc_endpoint_test_config config) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset); grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, - grpc_closure_create(inc_on_failure, &fail_count)); + grpc_closure_create(inc_on_failure, &fail_count, + grpc_schedule_on_exec_ctx)); wait_for_fail_count(&exec_ctx, &fail_count, 0); grpc_endpoint_shutdown(&exec_ctx, f.client_ep); wait_for_fail_count(&exec_ctx, &fail_count, 1); grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer, - grpc_closure_create(inc_on_failure, &fail_count)); + grpc_closure_create(inc_on_failure, &fail_count, + grpc_schedule_on_exec_ctx)); wait_for_fail_count(&exec_ctx, &fail_count, 2); grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a")); grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer, - grpc_closure_create(inc_on_failure, &fail_count)); + grpc_closure_create(inc_on_failure, &fail_count, + grpc_schedule_on_exec_ctx)); wait_for_fail_count(&exec_ctx, &fail_count, 3); grpc_endpoint_shutdown(&exec_ctx, f.client_ep); wait_for_fail_count(&exec_ctx, &fail_count, 3); diff --git a/test/core/iomgr/ev_epoll_linux_test.c b/test/core/iomgr/ev_epoll_linux_test.c index 564b05d7f4..5bce9801a5 100644 --- a/test/core/iomgr/ev_epoll_linux_test.c +++ b/test/core/iomgr/ev_epoll_linux_test.c @@ -102,7 +102,8 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx, int i; for (i = 0; i < num_pollsets; i++) { - grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset); + grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed); grpc_exec_ctx_flush(exec_ctx); diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c index 6166699fe6..4dd476526d 100644 --- a/test/core/iomgr/fd_posix_test.c +++ b/test/core/iomgr/fd_posix_test.c @@ -219,8 +219,8 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/ se->sv = sv; se->em_fd = grpc_fd_create(fd, "listener"); grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd); - se->session_read_closure.cb = session_read_cb; - se->session_read_closure.cb_arg = se; + grpc_closure_init(&se->session_read_closure, session_read_cb, se, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure); grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure); @@ -249,8 +249,8 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) { sv->em_fd = grpc_fd_create(fd, "server"); grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd); /* Register to be interested in reading from listen_fd. */ - sv->listen_closure.cb = listen_cb; - sv->listen_closure.cb_arg = sv; + grpc_closure_init(&sv->listen_closure, listen_cb, sv, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure); return port; @@ -333,8 +333,8 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */ if (errno == EAGAIN) { gpr_mu_lock(g_mu); if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) { - cl->write_closure.cb = client_session_write; - cl->write_closure.cb_arg = cl; + grpc_closure_init(&cl->write_closure, client_session_write, cl, + grpc_schedule_on_exec_ctx); grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure); cl->client_write_cnt++; } else { @@ -459,10 +459,10 @@ static void test_grpc_fd_change(void) { grpc_closure second_closure; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - first_closure.cb = first_read_callback; - first_closure.cb_arg = &a; - second_closure.cb = second_read_callback; - second_closure.cb_arg = &b; + grpc_closure_init(&first_closure, first_read_callback, &a, + grpc_schedule_on_exec_ctx); + grpc_closure_init(&second_closure, second_read_callback, &b, + grpc_schedule_on_exec_ctx); init_change_data(&a); init_change_data(&b); @@ -546,7 +546,8 @@ int main(int argc, char **argv) { grpc_pollset_init(g_pollset, &g_mu); test_grpc_fd(); test_grpc_fd_change(); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); gpr_free(g_pollset); diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 2dd0d88b3f..d844e6eceb 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -32,8 +32,10 @@ */ #include "src/core/lib/iomgr/resolve_address.h" +#include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/sync.h> +#include <grpc/support/thd.h> #include <grpc/support/time.h> #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" @@ -46,16 +48,73 @@ static gpr_timespec test_deadline(void) { typedef struct args_struct { gpr_event ev; grpc_resolved_addresses *addrs; + gpr_atm done_atm; + gpr_mu *mu; + grpc_pollset *pollset; + grpc_pollset_set *pollset_set; } args_struct; -void args_init(args_struct *args) { +static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} + +void args_init(grpc_exec_ctx *exec_ctx, args_struct *args) { gpr_event_init(&args->ev); + args->pollset = gpr_malloc(grpc_pollset_size()); + grpc_pollset_init(args->pollset, &args->mu); + args->pollset_set = grpc_pollset_set_create(); + grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset); args->addrs = NULL; } -void args_finish(args_struct *args) { +void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); + grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); + grpc_pollset_set_destroy(args->pollset_set); + grpc_closure do_nothing_cb; + grpc_closure_init(&do_nothing_cb, do_nothing, NULL, + grpc_schedule_on_exec_ctx); + grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb); + // exec_ctx needs to be flushed before calling grpc_pollset_destroy() + grpc_exec_ctx_flush(exec_ctx); + grpc_pollset_destroy(args->pollset); + gpr_free(args->pollset); +} + +static gpr_timespec n_sec_deadline(int seconds) { + return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_seconds(seconds, GPR_TIMESPAN)); +} + +static void actually_poll(void *argsp) { + args_struct *args = argsp; + gpr_timespec deadline = n_sec_deadline(10); + while (true) { + bool done = gpr_atm_acq_load(&args->done_atm) != 0; + if (done) { + break; + } + gpr_timespec time_left = + gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, + time_left.tv_sec, time_left.tv_nsec); + GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); + grpc_pollset_worker *worker = NULL; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + gpr_mu_lock(args->mu); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, + gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); + gpr_mu_unlock(args->mu); + grpc_exec_ctx_finish(&exec_ctx); + } + gpr_event_set(&args->ev, (void *)1); +} + +static void poll_pollset_until_request_done(args_struct *args) { + gpr_atm_rel_store(&args->done_atm, 0); + gpr_thd_id id; + gpr_thd_new(&id, actually_poll, args, NULL); } static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp, @@ -64,53 +123,65 @@ static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp, GPR_ASSERT(err == GRPC_ERROR_NONE); GPR_ASSERT(args->addrs != NULL); GPR_ASSERT(args->addrs->naddrs > 0); - gpr_event_set(&args->ev, (void *)1); + gpr_atm_rel_store(&args->done_atm, 1); } static void must_fail(grpc_exec_ctx *exec_ctx, void *argsp, grpc_error *err) { args_struct *args = argsp; GPR_ASSERT(err != GRPC_ERROR_NONE); - gpr_event_set(&args->ev, (void *)1); + gpr_atm_rel_store(&args->done_atm, 1); } static void test_localhost(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost:1", NULL, - grpc_closure_create(must_succeed, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, "localhost:1", NULL, args.pollset_set, + grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_default_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost", "1", - grpc_closure_create(must_succeed, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, "localhost", "1", args.pollset_set, + grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_missing_default_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost", NULL, - grpc_closure_create(must_fail, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, "localhost", NULL, args.pollset_set, + grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_ipv6_with_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, - grpc_closure_create(must_succeed, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set, + grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_ipv6_without_port(void) { @@ -119,13 +190,16 @@ static void test_ipv6_without_port(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], "80", - grpc_closure_create(must_succeed, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, kCases[i], "80", args.pollset_set, + grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } @@ -135,13 +209,16 @@ static void test_invalid_ip_addresses(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], NULL, - grpc_closure_create(must_fail, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, kCases[i], NULL, args.pollset_set, + grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } @@ -151,13 +228,16 @@ static void test_unparseable_hostports(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], "1", - grpc_closure_create(must_fail, &args), &args.addrs); + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address( + &exec_ctx, kCases[i], "1", args.pollset_set, + grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx), + &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } diff --git a/test/core/iomgr/resource_quota_test.c b/test/core/iomgr/resource_quota_test.c index a82d44f7f8..181776341f 100644 --- a/test/core/iomgr/resource_quota_test.c +++ b/test/core/iomgr/resource_quota_test.c @@ -45,7 +45,9 @@ static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { *(bool *)a = true; } -grpc_closure *set_bool(bool *p) { return grpc_closure_create(set_bool_cb, p); } +grpc_closure *set_bool(bool *p) { + return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx); +} typedef struct { size_t size; @@ -67,7 +69,7 @@ grpc_closure *make_reclaimer(grpc_resource_user *resource_user, size_t size, a->size = size; a->resource_user = resource_user; a->then = then; - return grpc_closure_create(reclaimer_cb, a); + return grpc_closure_create(reclaimer_cb, a, grpc_schedule_on_exec_ctx); } static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg, @@ -76,7 +78,8 @@ static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE); } grpc_closure *make_unused_reclaimer(grpc_closure *then) { - return grpc_closure_create(unused_reclaimer_cb, then); + return grpc_closure_create(unused_reclaimer_cb, then, + grpc_schedule_on_exec_ctx); } static void destroy_user(grpc_resource_user *usr) { diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c index 5fab826fb7..0ea7a000eb 100644 --- a/test/core/iomgr/tcp_client_posix_test.c +++ b/test/core/iomgr/tcp_client_posix_test.c @@ -113,7 +113,7 @@ void test_succeeds(void) { /* connect to it */ GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)addr, (socklen_t *)&resolved_addr.len) == 0); - grpc_closure_init(&done, must_succeed, NULL); + grpc_closure_init(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); @@ -163,7 +163,7 @@ void test_fails(void) { gpr_mu_unlock(g_mu); /* connect to a broken address */ - grpc_closure_init(&done, must_fail, NULL); + grpc_closure_init(&done, must_fail, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); @@ -207,7 +207,8 @@ int main(int argc, char **argv) { gpr_log(GPR_ERROR, "End of first test"); test_fails(); grpc_pollset_set_destroy(g_pollset_set); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index 5eafa570bb..c646e61de0 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -194,7 +194,7 @@ static void read_test(size_t num_bytes, size_t slice_size) { state.read_bytes = 0; state.target_read_bytes = written_bytes; grpc_slice_buffer_init(&state.incoming); - grpc_closure_init(&state.read_cb, read_cb, &state); + grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); @@ -245,7 +245,7 @@ static void large_read_test(size_t slice_size) { state.read_bytes = 0; state.target_read_bytes = (size_t)written_bytes; grpc_slice_buffer_init(&state.incoming); - grpc_closure_init(&state.read_cb, read_cb, &state); + grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); @@ -384,7 +384,8 @@ static void write_test(size_t num_bytes, size_t slice_size) { grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_addn(&outgoing, slices, num_blocks); - grpc_closure_init(&write_done_closure, write_done, &state); + grpc_closure_init(&write_done_closure, write_done, &state, + grpc_schedule_on_exec_ctx); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); drain_socket_blocking(sv[0], num_bytes, num_bytes); @@ -429,7 +430,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure fd_released_cb; int fd_released_done = 0; - grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done); + grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done, + grpc_schedule_on_exec_ctx); gpr_log(GPR_INFO, "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR, @@ -452,7 +454,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { state.read_bytes = 0; state.target_read_bytes = written_bytes; grpc_slice_buffer_init(&state.incoming); - grpc_closure_init(&state.read_cb, read_cb, &state); + grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); @@ -561,7 +563,8 @@ int main(int argc, char **argv) { grpc_pollset_init(g_pollset, &g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu); run_tests(); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c index 1b8a39c1be..020f005980 100644 --- a/test/core/iomgr/tcp_server_posix_test.c +++ b/test/core/iomgr/tcp_server_posix_test.c @@ -104,7 +104,7 @@ static void server_weak_ref_shutdown(grpc_exec_ctx *exec_ctx, void *arg, static void server_weak_ref_init(server_weak_ref *weak_ref) { weak_ref->server = NULL; grpc_closure_init(&weak_ref->server_shutdown, server_weak_ref_shutdown, - weak_ref); + weak_ref, grpc_schedule_on_exec_ctx); } /* Make weak_ref->server_shutdown a shutdown_starting cb on server. @@ -126,6 +126,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, on_connect_result temp_result; on_connect_result_set(&temp_result, acceptor); + gpr_free(acceptor); gpr_mu_lock(g_mu); g_result = temp_result; @@ -365,7 +366,8 @@ int main(int argc, char **argv) { test_connect(1); test_connect(10); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c index 9bea229466..0a247caf89 100644 --- a/test/core/iomgr/udp_server_test.c +++ b/test/core/iomgr/udp_server_test.c @@ -234,7 +234,8 @@ int main(int argc, char **argv) { test_receive(1); test_receive(10); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); gpr_free(g_pollset); diff --git a/test/core/security/create_jwt.c b/test/core/security/create_jwt.c index 741ace9bdd..ac795f29d2 100644 --- a/test/core/security/create_jwt.c +++ b/test/core/security/create_jwt.c @@ -72,6 +72,7 @@ int main(int argc, char **argv) { char *scope = NULL; char *json_key_file_path = NULL; char *service_url = NULL; + grpc_init(); gpr_cmdline *cl = gpr_cmdline_create("create_jwt"); gpr_cmdline_add_string(cl, "json_key", "File path of the json key.", &json_key_file_path); @@ -102,5 +103,6 @@ int main(int argc, char **argv) { create_jwt(json_key_file_path, service_url, scope); gpr_cmdline_destroy(cl); + grpc_shutdown(); return 0; } diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c index d4c755088d..d624a38438 100644 --- a/test/core/security/credentials_test.c +++ b/test/core/security/credentials_test.c @@ -565,7 +565,7 @@ static int compute_engine_httpcli_get_success_override( grpc_httpcli_response *response) { validate_compute_engine_http_request(request); *response = http_response(200, valid_oauth2_json_response); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -575,7 +575,7 @@ static int compute_engine_httpcli_get_failure_override( grpc_httpcli_response *response) { validate_compute_engine_http_request(request); *response = http_response(403, "Not Authorized."); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -668,7 +668,7 @@ static int refresh_token_httpcli_post_success( grpc_closure *on_done, grpc_httpcli_response *response) { validate_refresh_token_http_request(request, body, body_size); *response = http_response(200, valid_oauth2_json_response); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -678,7 +678,7 @@ static int refresh_token_httpcli_post_failure( grpc_closure *on_done, grpc_httpcli_response *response) { validate_refresh_token_http_request(request, body, body_size); *response = http_response(403, "Not Authorized."); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -917,7 +917,7 @@ static int default_creds_gce_detection_httpcli_get_success_override( response->hdrs = headers; GPR_ASSERT(strcmp(request->http.path, "/") == 0); GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -975,7 +975,7 @@ static int default_creds_gce_detection_httpcli_get_failure_override( GPR_ASSERT(strcmp(request->http.path, "/") == 0); GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0); *response = http_response(200, ""); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c index f8afba8d6d..a4d65dccd9 100644 --- a/test/core/security/jwt_verifier_test.c +++ b/test/core/security/jwt_verifier_test.c @@ -166,6 +166,13 @@ static const char claims_without_time_constraint[] = " \"jti\": \"jwtuniqueid\"," " \"foo\": \"bar\"}"; +static const char claims_with_bad_subject[] = + "{ \"aud\": \"https://foo.com\"," + " \"iss\": \"evil@blah.foo.com\"," + " \"sub\": \"juju@blah.foo.com\"," + " \"jti\": \"jwtuniqueid\"," + " \"foo\": \"bar\"}"; + static const char invalid_claims[] = "{ \"aud\": \"https://foo.com\"," " \"iss\": 46," /* Issuer cannot be a number. */ @@ -179,6 +186,38 @@ typedef struct { const char *expected_subject; } verifier_test_config; +static void test_jwt_issuer_email_domain(void) { + const char *d = grpc_jwt_issuer_email_domain("https://foo.com"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("foo.com"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain(""); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("@"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("bar@foo"); + GPR_ASSERT(strcmp(d, "foo") == 0); + d = grpc_jwt_issuer_email_domain("bar@foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar@blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar.blah@blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar.blah@baz.blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + + /* This is not a very good parser but make sure we do not crash on these weird + inputs. */ + d = grpc_jwt_issuer_email_domain("@foo"); + GPR_ASSERT(strcmp(d, "foo") == 0); + d = grpc_jwt_issuer_email_domain("bar@."); + GPR_ASSERT(d != NULL); + d = grpc_jwt_issuer_email_domain("bar@.."); + GPR_ASSERT(d != NULL); + d = grpc_jwt_issuer_email_domain("bar@..."); + GPR_ASSERT(d != NULL); +} + static void test_claims_success(void) { grpc_jwt_claims *claims; grpc_slice s = grpc_slice_from_copied_string(claims_without_time_constraint); @@ -242,6 +281,19 @@ static void test_bad_audience_claims_failure(void) { grpc_jwt_claims_destroy(claims); } +static void test_bad_subject_claims_failure(void) { + grpc_jwt_claims *claims; + grpc_slice s = grpc_slice_from_copied_string(claims_with_bad_subject); + grpc_json *json = grpc_json_parse_string_with_len( + (char *)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s)); + GPR_ASSERT(json != NULL); + claims = grpc_jwt_claims_from_json(json, s); + GPR_ASSERT(claims != NULL); + GPR_ASSERT(grpc_jwt_claims_check(claims, "https://foo.com") == + GRPC_JWT_VERIFIER_BAD_SUBJECT); + grpc_jwt_claims_destroy(claims); +} + static char *json_key_str(const char *last_part) { size_t result_len = strlen(json_key_str_part1) + strlen(json_key_str_part2) + strlen(last_part); @@ -294,7 +346,7 @@ static int httpcli_get_google_keys_for_email( "/robot/v1/metadata/x509/" "777-abaslkan11hlb6nmim3bpspl31ud@developer." "gserviceaccount.com") == 0); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -338,7 +390,7 @@ static int httpcli_get_custom_keys_for_email( GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); GPR_ASSERT(strcmp(request->host, "keys.bar.com") == 0); GPR_ASSERT(strcmp(request->http.path, "/jwk/foo@bar.com") == 0); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -372,7 +424,7 @@ static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx, GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); GPR_ASSERT(strcmp(request->host, "www.googleapis.com") == 0); GPR_ASSERT(strcmp(request->http.path, "/oauth2/v3/certs") == 0); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -387,7 +439,7 @@ static int httpcli_get_openid_config(grpc_exec_ctx *exec_ctx, GPR_ASSERT(strcmp(request->http.path, GRPC_OPENID_CONFIG_URL_SUFFIX) == 0); grpc_httpcli_set_override(httpcli_get_jwk_set, httpcli_post_should_not_be_called); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -427,7 +479,7 @@ static int httpcli_get_bad_json(grpc_exec_ctx *exec_ctx, grpc_httpcli_response *response) { *response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}")); GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); - grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE); return 1; } @@ -563,10 +615,12 @@ static void test_jwt_verifier_bad_format(void) { int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); + test_jwt_issuer_email_domain(); test_claims_success(); test_expired_claims_failure(); test_invalid_claims_failure(); test_bad_audience_claims_failure(); + test_bad_subject_claims_failure(); test_jwt_verifier_google_email_issuer_success(); test_jwt_verifier_custom_email_issuer_success(); test_jwt_verifier_url_issuer_success(); diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c index 44a209258d..ff77af908a 100644 --- a/test/core/security/oauth2_utils.c +++ b/test/core/security/oauth2_utils.c @@ -92,7 +92,8 @@ char *grpc_test_fetch_oauth2_token_with_credentials( request.pops = grpc_polling_entity_create_from_pollset(pollset); request.is_done = 0; - grpc_closure_init(&do_nothing_closure, do_nothing, NULL); + grpc_closure_init(&do_nothing_closure, do_nothing, NULL, + grpc_schedule_on_exec_ctx); grpc_call_credentials_get_request_metadata( &exec_ctx, creds, &request.pops, null_ctx, on_oauth2_response, &request); diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index b5d95004fe..cbf8a171af 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -158,7 +158,7 @@ static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) { gpr_log(GPR_INFO, "Start test left over"); grpc_slice_buffer_init(&incoming); - grpc_closure_init(&done_closure, inc_call_ctr, &n); + grpc_closure_init(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure); grpc_exec_ctx_finish(&exec_ctx); GPR_ASSERT(n == 1); @@ -191,7 +191,8 @@ int main(int argc, char **argv) { grpc_pollset_init(g_pollset, &g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu); test_leftover(configs[1], 1); - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); + grpc_closure_init(&destroyed, destroy_pollset, g_pollset, + grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/security/ssl_server_fuzzer.c b/test/core/security/ssl_server_fuzzer.c index ca629a6eba..8673225fef 100644 --- a/test/core/security/ssl_server_fuzzer.c +++ b/test/core/security/ssl_server_fuzzer.c @@ -111,8 +111,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct handshake_state state; state.done_callback_called = false; grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create(); - grpc_server_security_connector_create_handshakers(&exec_ctx, sc, - handshake_mgr); + grpc_server_security_connector_add_handshakers(&exec_ctx, sc, handshake_mgr); grpc_handshake_manager_do_handshake( &exec_ctx, handshake_mgr, mock_endpoint, NULL /* channel_args */, deadline, NULL /* acceptor */, on_handshake_done, &state); diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c index 043d29e6bb..ccc85c9f32 100644 --- a/test/core/security/verify_jwt.c +++ b/test/core/security/verify_jwt.c @@ -93,6 +93,7 @@ int main(int argc, char **argv) { char *aud = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_init(); cl = gpr_cmdline_create("JWT verifier tool"); gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt); gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud); @@ -131,5 +132,6 @@ int main(int argc, char **argv) { grpc_jwt_verifier_destroy(verifier); gpr_cmdline_destroy(cl); + grpc_shutdown(); return !sync.success; } diff --git a/test/core/support/string_test.c b/test/core/support/string_test.c index 78b77fad8e..af232db350 100644 --- a/test/core/support/string_test.c +++ b/test/core/support/string_test.c @@ -243,6 +243,8 @@ static void test_int64toa() { static void test_leftpad() { char *padded; + LOG_TEST_NAME("test_leftpad"); + padded = gpr_leftpad("foo", ' ', 5); GPR_ASSERT(0 == strcmp(" foo", padded)); gpr_free(padded); @@ -273,12 +275,25 @@ static void test_leftpad() { } static void test_stricmp(void) { + LOG_TEST_NAME("test_stricmp"); + GPR_ASSERT(0 == gpr_stricmp("hello", "hello")); GPR_ASSERT(0 == gpr_stricmp("HELLO", "hello")); GPR_ASSERT(gpr_stricmp("a", "b") < 0); GPR_ASSERT(gpr_stricmp("b", "a") > 0); } +static void test_memrchr(void) { + LOG_TEST_NAME("test_memrchr"); + + GPR_ASSERT(NULL == gpr_memrchr(NULL, 'a', 0)); + GPR_ASSERT(NULL == gpr_memrchr("", 'a', 0)); + GPR_ASSERT(NULL == gpr_memrchr("hello", 'b', 5)); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'h', 5), "hello")); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'o', 5), "o")); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'l', 5), "lo")); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); test_strdup(); @@ -291,5 +306,6 @@ int main(int argc, char **argv) { test_int64toa(); test_leftpad(); test_stricmp(); + test_memrchr(); return 0; } diff --git a/test/core/surface/channel_create_test.c b/test/core/surface/channel_create_test.c index ad7970aab9..654e5324d9 100644 --- a/test/core/surface/channel_create_test.c +++ b/test/core/surface/channel_create_test.c @@ -31,9 +31,14 @@ * */ +#include <string.h> + #include <grpc/grpc.h> #include <grpc/support/log.h> + #include "src/core/ext/client_channel/resolver_registry.h" +#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/surface/channel.h" #include "test/core/util/test_config.h" void test_unknown_scheme_target(void) { @@ -44,6 +49,13 @@ void test_unknown_scheme_target(void) { chan = grpc_insecure_channel_create("blah://blah", NULL, NULL); GPR_ASSERT(chan != NULL); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_channel_element *elem = + grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0); + GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client")); + grpc_exec_ctx_finish(&exec_ctx); + grpc_channel_destroy(chan); } diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c index f9f4675454..8ebe8d07e4 100644 --- a/test/core/surface/concurrent_connectivity_test.c +++ b/test/core/surface/concurrent_connectivity_test.c @@ -105,8 +105,8 @@ void server_thread(void *vargs) { static void on_connect(grpc_exec_ctx *exec_ctx, void *vargs, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); struct server_thread_args *args = (struct server_thread_args *)vargs; - (void)acceptor; grpc_endpoint_shutdown(exec_ctx, tcp); grpc_endpoint_destroy(exec_ctx, tcp); GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, NULL)); @@ -229,9 +229,9 @@ int main(int argc, char **argv) { gpr_atm_rel_store(&args.stop, 1); gpr_thd_join(server); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_pollset_shutdown( - &exec_ctx, args.pollset, - grpc_closure_create(done_pollset_shutdown, args.pollset)); + grpc_pollset_shutdown(&exec_ctx, args.pollset, + grpc_closure_create(done_pollset_shutdown, args.pollset, + grpc_schedule_on_exec_ctx)); grpc_exec_ctx_finish(&exec_ctx); grpc_shutdown(); diff --git a/test/core/surface/lame_client_test.c b/test/core/surface/lame_client_test.c index 6afcefca92..b6db6a6b08 100644 --- a/test/core/surface/lame_client_test.c +++ b/test/core/surface/lame_client_test.c @@ -62,7 +62,8 @@ void test_transport_op(grpc_channel *channel) { grpc_connectivity_state state = GRPC_CHANNEL_IDLE; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_closure_init(&transport_op_cb, verify_connectivity, &state); + grpc_closure_init(&transport_op_cb, verify_connectivity, &state, + grpc_schedule_on_exec_ctx); op = grpc_make_transport_op(NULL); op->on_connectivity_state_change = &transport_op_cb; @@ -71,7 +72,8 @@ void test_transport_op(grpc_channel *channel) { elem->filter->start_transport_op(&exec_ctx, elem, op); grpc_exec_ctx_finish(&exec_ctx); - grpc_closure_init(&transport_op_cb, do_nothing, NULL); + grpc_closure_init(&transport_op_cb, do_nothing, NULL, + grpc_schedule_on_exec_ctx); op = grpc_make_transport_op(&transport_op_cb); elem->filter->start_transport_op(&exec_ctx, elem, op); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/transport/connectivity_state_test.c b/test/core/transport/connectivity_state_test.c index 1050059eff..3520ef0a80 100644 --- a/test/core/transport/connectivity_state_test.c +++ b/test/core/transport/connectivity_state_test.c @@ -86,7 +86,8 @@ static void test_check(void) { static void test_subscribe_then_unsubscribe(void) { grpc_connectivity_state_tracker tracker; - grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG); + grpc_closure *closure = + grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx); grpc_connectivity_state state = GRPC_CHANNEL_IDLE; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_subscribe_then_unsubscribe"); @@ -109,7 +110,8 @@ static void test_subscribe_then_unsubscribe(void) { static void test_subscribe_then_destroy(void) { grpc_connectivity_state_tracker tracker; - grpc_closure *closure = grpc_closure_create(must_succeed, THE_ARG); + grpc_closure *closure = + grpc_closure_create(must_succeed, THE_ARG, grpc_schedule_on_exec_ctx); grpc_connectivity_state state = GRPC_CHANNEL_IDLE; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_subscribe_then_destroy"); @@ -128,7 +130,8 @@ static void test_subscribe_then_destroy(void) { static void test_subscribe_with_failure_then_destroy(void) { grpc_connectivity_state_tracker tracker; - grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG); + grpc_closure *closure = + grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx); grpc_connectivity_state state = GRPC_CHANNEL_SHUTDOWN; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_subscribe_with_failure_then_destroy"); diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c index bf6d85252a..04793bceab 100644 --- a/test/core/util/mock_endpoint.c +++ b/test/core/util/mock_endpoint.c @@ -55,7 +55,7 @@ static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_mu_lock(&m->mu); if (m->read_buffer.count > 0) { grpc_slice_buffer_swap(&m->read_buffer, slices); - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE); } else { m->on_read = cb; m->on_read_out = slices; @@ -69,7 +69,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, for (size_t i = 0; i < slices->count; i++) { m->on_write(slices->slices[i]); } - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE); } static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, @@ -82,8 +82,8 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep; gpr_mu_lock(&m->mu); if (m->on_read) { - grpc_exec_ctx_sched(exec_ctx, m->on_read, - GRPC_ERROR_CREATE("Endpoint Shutdown"), NULL); + grpc_closure_sched(exec_ctx, m->on_read, + GRPC_ERROR_CREATE("Endpoint Shutdown")); m->on_read = NULL; } gpr_mu_unlock(&m->mu); @@ -144,7 +144,7 @@ void grpc_mock_endpoint_put_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_mu_lock(&m->mu); if (m->on_read != NULL) { grpc_slice_buffer_add(m->on_read_out, slice); - grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE); m->on_read = NULL; } else { grpc_slice_buffer_add(&m->read_buffer, slice); diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c index b3405f02e9..15ba092c5b 100644 --- a/test/core/util/passthru_endpoint.c +++ b/test/core/util/passthru_endpoint.c @@ -63,11 +63,10 @@ static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, half *m = (half *)ep; gpr_mu_lock(&m->parent->mu); if (m->parent->shutdown) { - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"), - NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown")); } else if (m->read_buffer.count > 0) { grpc_slice_buffer_swap(&m->read_buffer, slices); - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE); } else { m->on_read = cb; m->on_read_out = slices; @@ -91,7 +90,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, for (size_t i = 0; i < slices->count; i++) { grpc_slice_buffer_add(m->on_read_out, grpc_slice_ref(slices->slices[i])); } - grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL); + grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE); m->on_read = NULL; } else { for (size_t i = 0; i < slices->count; i++) { @@ -99,7 +98,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } } gpr_mu_unlock(&m->parent->mu); - grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); + grpc_closure_sched(exec_ctx, cb, error); } static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, @@ -113,14 +112,12 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { gpr_mu_lock(&m->parent->mu); m->parent->shutdown = true; if (m->on_read) { - grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"), - NULL); + grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown")); m->on_read = NULL; } m = other_half(m); if (m->on_read) { - grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"), - NULL); + grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown")); m->on_read = NULL; } gpr_mu_unlock(&m->parent->mu); diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c index b2342feeb4..0bde726ba1 100644 --- a/test/core/util/port_server_client.c +++ b/test/core/util/port_server_client.c @@ -92,7 +92,8 @@ void grpc_free_port_using_server(char *server, int port) { grpc_pollset *pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(pollset, &pr.mu); pr.pops = grpc_polling_entity_create_from_pollset(pollset); - shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops); + shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops, + grpc_schedule_on_exec_ctx); req.host = server; gpr_asprintf(&path, "/drop/%d", port); @@ -103,7 +104,9 @@ void grpc_free_port_using_server(char *server, int port) { grpc_resource_quota_create("port_server_client/free"); grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), - grpc_closure_create(freed_port_from_server, &pr), &rsp); + grpc_closure_create(freed_port_from_server, &pr, + grpc_schedule_on_exec_ctx), + &rsp); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(pr.mu); while (!pr.done) { @@ -174,7 +177,8 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg, grpc_resource_quota_create("port_server_client/pick_retry"); grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), - grpc_closure_create(got_port_from_server, pr), + grpc_closure_create(got_port_from_server, pr, + grpc_schedule_on_exec_ctx), &pr->response); grpc_resource_quota_internal_unref(exec_ctx, resource_quota); return; @@ -208,7 +212,8 @@ int grpc_pick_port_using_server(char *server) { grpc_pollset *pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(pollset, &pr.mu); pr.pops = grpc_polling_entity_create_from_pollset(pollset); - shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops); + shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops, + grpc_schedule_on_exec_ctx); pr.port = -1; pr.server = server; pr.ctx = &context; @@ -219,10 +224,11 @@ int grpc_pick_port_using_server(char *server) { grpc_httpcli_context_init(&context); grpc_resource_quota *resource_quota = grpc_resource_quota_create("port_server_client/pick"); - grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req, - GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), - grpc_closure_create(got_port_from_server, &pr), - &pr.response); + grpc_httpcli_get( + &exec_ctx, &context, &pr.pops, resource_quota, &req, + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), + grpc_closure_create(got_port_from_server, &pr, grpc_schedule_on_exec_ctx), + &pr.response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(pr.mu); diff --git a/test/core/util/reconnect_server.c b/test/core/util/reconnect_server.c index 6509cc5b68..7bf83a74a1 100644 --- a/test/core/util/reconnect_server.c +++ b/test/core/util/reconnect_server.c @@ -73,6 +73,7 @@ static void pretty_print_backoffs(reconnect_server *server) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); char *peer; char *last_colon; reconnect_server *server = (reconnect_server *)arg; diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c index 16df91d968..2338b81ab1 100644 --- a/test/core/util/test_tcp_server.c +++ b/test/core/util/test_tcp_server.c @@ -57,7 +57,8 @@ void test_tcp_server_init(test_tcp_server *server, grpc_tcp_server_cb on_connect, void *user_data) { grpc_init(); server->tcp_server = NULL; - grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server); + grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server, + grpc_schedule_on_exec_ctx); server->shutdown = 0; server->pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(server->pollset, &server->mu); @@ -111,7 +112,8 @@ void test_tcp_server_destroy(test_tcp_server *server) { gpr_timespec shutdown_deadline; grpc_closure do_nothing_cb; grpc_tcp_server_unref(&exec_ctx, server->tcp_server); - grpc_closure_init(&do_nothing_cb, do_nothing, NULL); + grpc_closure_init(&do_nothing_cb, do_nothing, NULL, + grpc_schedule_on_exec_ctx); shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(5, GPR_TIMESPAN)); while (!server->shutdown && diff --git a/test/cpp/common/channel_filter_test.cc b/test/cpp/common/channel_filter_test.cc index 600a953d82..32246a4b76 100644 --- a/test/cpp/common/channel_filter_test.cc +++ b/test/cpp/common/channel_filter_test.cc @@ -41,14 +41,24 @@ namespace testing { class MyChannelData : public ChannelData { public: - MyChannelData(const grpc_channel_args& args, const char* peer) - : ChannelData(args, peer) {} + MyChannelData() {} + + grpc_error* Init(grpc_exec_ctx* exec_ctx, + grpc_channel_element_args* args) override { + (void)args->channel_args; // Make sure field is available. + return GRPC_ERROR_NONE; + } }; class MyCallData : public CallData { public: - explicit MyCallData(const ChannelData& channel_data) - : CallData(channel_data) {} + MyCallData() {} + + grpc_error* Init(grpc_exec_ctx* exec_ctx, ChannelData* channel_data, + grpc_call_element_args* args) override { + (void)args->path; // Make sure field is available. + return GRPC_ERROR_NONE; + } }; // This test ensures that when we make changes to the filter API in diff --git a/test/cpp/end2end/filter_end2end_test.cc b/test/cpp/end2end/filter_end2end_test.cc index ab6ed46de5..bd384f68b4 100644 --- a/test/cpp/end2end/filter_end2end_test.cc +++ b/test/cpp/end2end/filter_end2end_test.cc @@ -114,20 +114,17 @@ int GetCallCounterValue() { class ChannelDataImpl : public ChannelData { public: - ChannelDataImpl(const grpc_channel_args& args, const char* peer) - : ChannelData(args, peer) { + grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_channel_element_args* args) { IncrementConnectionCounter(); + return GRPC_ERROR_NONE; } }; class CallDataImpl : public CallData { public: - explicit CallDataImpl(const ChannelDataImpl& channel_data) - : CallData(channel_data) {} - void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, TransportStreamOp* op) override { - // Incrementing the counter could be done from the ctor, but we want + // Incrementing the counter could be done from Init(), but we want // to test that the individual methods are actually called correctly. if (op->recv_initial_metadata() != nullptr) IncrementCallCounter(); grpc_call_next_op(exec_ctx, elem, op->op()); diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc index fcdcaba6a2..de304b9f89 100644 --- a/test/cpp/grpclb/grpclb_test.cc +++ b/test/cpp/grpclb/grpclb_test.cc @@ -659,7 +659,7 @@ static test_fixture setup_test_fixture(int lb_server_update_delay_ms) { char *server_uri; // The grpclb LB policy will be automatically selected by virtue of // the fact that the returned addresses are balancer addresses. - gpr_asprintf(&server_uri, "test:%s?lb_enabled=1", + gpr_asprintf(&server_uri, "test:///%s?lb_enabled=1", tf.lb_server.servers_hostport); setup_client(server_uri, &tf.client); gpr_free(server_uri); diff --git a/test/cpp/interop/interop_test.cc b/test/cpp/interop/interop_test.cc index c066598d36..d4004740a4 100644 --- a/test/cpp/interop/interop_test.cc +++ b/test/cpp/interop/interop_test.cc @@ -126,7 +126,7 @@ int main(int argc, char** argv) { return 1; } /* wait a little */ - sleep(2); + sleep(10); /* start the clients */ ret = test_client(root, "127.0.0.1", port); if (ret != 0) return ret; diff --git a/test/cpp/interop/stress_test.cc b/test/cpp/interop/stress_test.cc index fc35db5233..97e658869f 100644 --- a/test/cpp/interop/stress_test.cc +++ b/test/cpp/interop/stress_test.cc @@ -371,9 +371,9 @@ int main(int argc, char** argv) { } // Start metrics server before waiting for the stress test threads + std::unique_ptr<grpc::Server> metrics_server; if (FLAGS_metrics_port > 0) { - std::unique_ptr<grpc::Server> metrics_server = - metrics_service.StartServer(FLAGS_metrics_port); + metrics_server = metrics_service.StartServer(FLAGS_metrics_port); } // Wait for the stress test threads to complete diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc index 6cc780d44a..6c0bf80488 100644 --- a/test/cpp/microbenchmarks/bm_fullstack.cc +++ b/test/cpp/microbenchmarks/bm_fullstack.cc @@ -59,7 +59,7 @@ extern "C" { } #include "src/cpp/client/create_channel_internal.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "third_party/google_benchmark/include/benchmark/benchmark.h" +#include "third_party/benchmark/include/benchmark/benchmark.h" namespace grpc { namespace testing { diff --git a/test/cpp/microbenchmarks/noop-benchmark.cc b/test/cpp/microbenchmarks/noop-benchmark.cc index 6b06c69c6e..99fa6d5f6e 100644 --- a/test/cpp/microbenchmarks/noop-benchmark.cc +++ b/test/cpp/microbenchmarks/noop-benchmark.cc @@ -31,10 +31,10 @@ * */ -/* This benchmark exists to ensure that the google_benchmark integration is +/* This benchmark exists to ensure that the benchmark integration is * working */ -#include "third_party/google_benchmark/include/benchmark/benchmark.h" +#include "third_party/benchmark/include/benchmark/benchmark.h" static void BM_NoOp(benchmark::State& state) { while (state.KeepRunning()) { diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index ea0b38e8ad..93ef32db77 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -44,6 +44,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/host_port.h> #include <grpc/support/log.h> +#include <grpc/support/string_util.h> #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/env.h" @@ -99,23 +100,36 @@ static std::unordered_map<string, std::deque<int>> get_hosts_and_cores( return hosts; } -static deque<string> get_workers(const string& name) { - char* env = gpr_getenv(name.c_str()); - if (!env) return deque<string>(); - +static deque<string> get_workers(const string& env_name) { + char* env = gpr_getenv(env_name.c_str()); + if (!env) { + env = gpr_strdup(""); + } deque<string> out; char* p = env; - for (;;) { - char* comma = strchr(p, ','); - if (comma) { - out.emplace_back(p, comma); - p = comma + 1; - } else { - out.emplace_back(p); - gpr_free(env); - return out; + if (strlen(env) != 0) { + for (;;) { + char* comma = strchr(p, ','); + if (comma) { + out.emplace_back(p, comma); + p = comma + 1; + } else { + out.emplace_back(p); + break; + } } } + if (out.size() == 0) { + gpr_log(GPR_ERROR, + "Environment variable \"%s\" does not contain a list of QPS " + "workers to use. Set it to a comma-separated list of " + "hostname:port pairs, starting with hosts that should act as " + "servers. E.g. export " + "%s=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"", + env_name.c_str(), env_name.c_str()); + } + gpr_free(env); + return out; } // helpers for postprocess_scenario_result @@ -195,7 +209,8 @@ static void postprocess_scenario_result(ScenarioResult* result) { std::unique_ptr<ScenarioResult> RunScenario( const ClientConfig& initial_client_config, size_t num_clients, const ServerConfig& initial_server_config, size_t num_servers, - int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count) { + int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count, + const char* qps_server_target_override, bool configure_core_lists) { // Log everything from the driver gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG); @@ -240,9 +255,7 @@ std::unique_ptr<ScenarioResult> RunScenario( workers.push_back(addr); } } - - // Setup the hosts and core counts - auto hosts_cores = get_hosts_and_cores(workers); + GPR_ASSERT(workers.size() != 0); // if num_clients is set to <=0, do dynamic sizing: all workers // except for servers are clients @@ -264,6 +277,11 @@ std::unique_ptr<ScenarioResult> RunScenario( unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream; }; std::vector<ServerData> servers(num_servers); + std::unordered_map<string, std::deque<int>> hosts_cores; + + if (configure_core_lists) { + hosts_cores = get_hosts_and_cores(workers); + } for (size_t i = 0; i < num_servers; i++) { gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")", workers[i].c_str(), i); @@ -271,37 +289,36 @@ std::unique_ptr<ScenarioResult> RunScenario( CreateChannel(workers[i], InsecureChannelCredentials())); ServerConfig server_config = initial_server_config; - char* host; - char* driver_port; - char* cli_target; - gpr_split_host_port(workers[i].c_str(), &host, &driver_port); - string host_str(host); int server_core_limit = initial_server_config.core_limit(); int client_core_limit = initial_client_config.core_limit(); - if (server_core_limit == 0 && client_core_limit > 0) { - // In this case, limit the server cores if it matches the - // same host as one or more clients - const auto& dq = hosts_cores.at(host_str); - bool match = false; - int limit = dq.size(); - for (size_t cli = 0; cli < num_clients; cli++) { - if (host_str == get_host(workers[cli + num_servers])) { - limit -= client_core_limit; - match = true; + if (configure_core_lists) { + string host_str(get_host(workers[i])); + if (server_core_limit == 0 && client_core_limit > 0) { + // In this case, limit the server cores if it matches the + // same host as one or more clients + const auto& dq = hosts_cores.at(host_str); + bool match = false; + int limit = dq.size(); + for (size_t cli = 0; cli < num_clients; cli++) { + if (host_str == get_host(workers[cli + num_servers])) { + limit -= client_core_limit; + match = true; + } + } + if (match) { + GPR_ASSERT(limit > 0); + server_core_limit = limit; } } - if (match) { - GPR_ASSERT(limit > 0); - server_core_limit = limit; - } - } - if (server_core_limit > 0) { - auto& dq = hosts_cores.at(host_str); - GPR_ASSERT(dq.size() >= static_cast<size_t>(server_core_limit)); - for (int core = 0; core < server_core_limit; core++) { - server_config.add_core_list(dq.front()); - dq.pop_front(); + if (server_core_limit > 0) { + auto& dq = hosts_cores.at(host_str); + GPR_ASSERT(dq.size() >= static_cast<size_t>(server_core_limit)); + gpr_log(GPR_INFO, "Setting server core_list"); + for (int core = 0; core < server_core_limit; core++) { + server_config.add_core_list(dq.front()); + dq.pop_front(); + } } } @@ -315,11 +332,19 @@ std::unique_ptr<ScenarioResult> RunScenario( if (!servers[i].stream->Read(&init_status)) { gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i); } - gpr_join_host_port(&cli_target, host, init_status.port()); - client_config.add_server_targets(cli_target); - gpr_free(host); - gpr_free(driver_port); - gpr_free(cli_target); + if (qps_server_target_override != NULL && + strlen(qps_server_target_override) > 0) { + // overriding the qps server target only works if there is 1 server + GPR_ASSERT(num_servers == 1); + client_config.add_server_targets(qps_server_target_override); + } else { + std::string host; + char* cli_target; + host = get_host(workers[i]); + gpr_join_host_port(&cli_target, host.c_str(), init_status.port()); + client_config.add_server_targets(cli_target); + gpr_free(cli_target); + } } // Targets are all set by now @@ -341,7 +366,8 @@ std::unique_ptr<ScenarioResult> RunScenario( int server_core_limit = initial_server_config.core_limit(); int client_core_limit = initial_client_config.core_limit(); - if ((server_core_limit > 0) || (client_core_limit > 0)) { + if (configure_core_lists && + ((server_core_limit > 0) || (client_core_limit > 0))) { auto& dq = hosts_cores.at(get_host(worker)); if (client_core_limit == 0) { // limit client cores if it matches a server host @@ -359,6 +385,7 @@ std::unique_ptr<ScenarioResult> RunScenario( } if (client_core_limit > 0) { GPR_ASSERT(dq.size() >= static_cast<size_t>(client_core_limit)); + gpr_log(GPR_INFO, "Setting client core_list"); for (int core = 0; core < client_core_limit; core++) { per_client_config.add_core_list(dq.front()); dq.pop_front(); @@ -548,6 +575,9 @@ bool RunQuit() { // Get client, server lists bool result = true; auto workers = get_workers("QPS_WORKERS"); + if (workers.size() == 0) { + return false; + } for (size_t i = 0; i < workers.size(); i++) { auto stub = WorkerService::NewStub( CreateChannel(workers[i], InsecureChannelCredentials())); diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h index 93f4370caf..b5c8152e1b 100644 --- a/test/cpp/qps/driver.h +++ b/test/cpp/qps/driver.h @@ -45,7 +45,9 @@ namespace testing { std::unique_ptr<ScenarioResult> RunScenario( const grpc::testing::ClientConfig& client_config, size_t num_clients, const grpc::testing::ServerConfig& server_config, size_t num_servers, - int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count); + int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count, + const char* qps_server_target_override = "", + bool configure_core_lists = true); bool RunQuit(); } // namespace testing diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 31b5917fb7..da835b995a 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -67,17 +67,25 @@ DEFINE_double(error_tolerance, 0.01, "range is narrower than the error_tolerance computed range, we " "stop the search."); +DEFINE_string(qps_server_target_override, "", + "Override QPS server target to configure in client configs." + "Only applicable if there is a single benchmark server."); +DEFINE_bool(configure_core_lists, true, + "Provide 'core_list' parameters to workers. Value determined " + "by cores available and 'core_limit' parameters of the scenarios."); + namespace grpc { namespace testing { static std::unique_ptr<ScenarioResult> RunAndReport(const Scenario& scenario, bool* success) { std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - auto result = - RunScenario(scenario.client_config(), scenario.num_clients(), - scenario.server_config(), scenario.num_servers(), - scenario.warmup_seconds(), scenario.benchmark_seconds(), - scenario.spawn_local_worker_count()); + auto result = RunScenario( + scenario.client_config(), scenario.num_clients(), + scenario.server_config(), scenario.num_servers(), + scenario.warmup_seconds(), scenario.benchmark_seconds(), + scenario.spawn_local_worker_count(), + FLAGS_qps_server_target_override.c_str(), FLAGS_configure_core_lists); // Amend the result with scenario config. Eventually we should adjust // RunScenario contract so we don't need to touch the result here. diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc index 03c33abe9f..b9900ca1b7 100644 --- a/test/cpp/util/grpc_tool.cc +++ b/test/cpp/util/grpc_tool.cc @@ -86,11 +86,12 @@ class GrpcTool { // callback); // bool PrintTypeId(int argc, const char** argv, GrpcToolOutputCallback // callback); - // bool ParseMessage(int argc, const char** argv, GrpcToolOutputCallback - // callback); - // bool ToText(int argc, const char** argv, GrpcToolOutputCallback callback); - // bool ToBinary(int argc, const char** argv, GrpcToolOutputCallback - // callback); + bool ParseMessage(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); + bool ToText(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); + bool ToBinary(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); void SetPrintCommandMode(int exit_status) { print_command_usage_ = true; @@ -173,9 +174,9 @@ const Command ops[] = { {"list", BindWith5Args(&GrpcTool::ListServices), 1, 3}, {"call", BindWith5Args(&GrpcTool::CallMethod), 2, 3}, {"type", BindWith5Args(&GrpcTool::PrintType), 2, 2}, - // {"parse", BindWith5Args(&GrpcTool::ParseMessage), 2, 3}, - // {"totext", BindWith5Args(&GrpcTool::ToText), 2, 3}, - // {"tobinary", BindWith5Args(&GrpcTool::ToBinary), 2, 3}, + {"parse", BindWith5Args(&GrpcTool::ParseMessage), 2, 3}, + {"totext", BindWith5Args(&GrpcTool::ToText), 2, 3}, + {"tobinary", BindWith5Args(&GrpcTool::ToBinary), 2, 3}, }; void Usage(const grpc::string& msg) { @@ -185,9 +186,9 @@ void Usage(const grpc::string& msg) { " grpc_cli ls ... ; List services\n" " grpc_cli call ... ; Call method\n" " grpc_cli type ... ; Print type\n" - // " grpc_cli parse ... ; Parse message\n" - // " grpc_cli totext ... ; Convert binary message to text\n" - // " grpc_cli tobinary ... ; Convert text message to binary\n" + " grpc_cli parse ... ; Parse message\n" + " grpc_cli totext ... ; Convert binary message to text\n" + " grpc_cli tobinary ... ; Convert text message to binary\n" " grpc_cli help ... ; Print this message, or per-command usage\n" "\n", msg.c_str()); @@ -414,6 +415,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, grpc::string request_text; grpc::string server_address(argv[0]); grpc::string method_name(argv[1]); + grpc::string formatted_method_name; std::unique_ptr<grpc::testing::ProtoFileParser> parser; grpc::string serialized_request_proto; @@ -450,7 +452,9 @@ bool GrpcTool::CallMethod(int argc, const char** argv, if (FLAGS_binary_input) { serialized_request_proto = request_text; + formatted_method_name = method_name; } else { + formatted_method_name = parser->GetFormattedMethodName(method_name); serialized_request_proto = parser->GetSerializedProtoFromMethod( method_name, request_text, true /* is_request */); if (parser->HasError()) { @@ -466,9 +470,9 @@ bool GrpcTool::CallMethod(int argc, const char** argv, ParseMetadataFlag(&client_metadata); PrintMetadata(client_metadata, "Sending client initial metadata:"); grpc::Status status = grpc::testing::CliCall::Call( - channel, parser->GetFormatedMethodName(method_name), - serialized_request_proto, &serialized_response_proto, client_metadata, - &server_initial_metadata, &server_trailing_metadata); + channel, formatted_method_name, serialized_request_proto, + &serialized_response_proto, client_metadata, &server_initial_metadata, + &server_trailing_metadata); PrintMetadata(server_initial_metadata, "Received initial metadata from server:"); PrintMetadata(server_trailing_metadata, @@ -493,5 +497,122 @@ bool GrpcTool::CallMethod(int argc, const char** argv, return callback(output_ss.str()); } +bool GrpcTool::ParseMessage(int argc, const char** argv, + const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Parse message\n" + " grpc_cli parse <address> <type> [<message>]\n" + " <address> ; host:port\n" + " <type> ; Protocol buffer type name\n" + " <message> ; Text protobuffer (overrides --infile)\n" + " --protofiles ; Comma separated proto files used as a" + " fallback when parsing request/response\n" + " --proto_path ; The search path of proto files, valid" + " only when --protofiles is given\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n" + " --binary_input ; Input in binary format\n" + " --binary_output ; Output in binary format\n" + + cred.GetCredentialUsage()); + + std::stringstream output_ss; + grpc::string message_text; + grpc::string server_address(argv[0]); + grpc::string type_name(argv[1]); + std::unique_ptr<grpc::testing::ProtoFileParser> parser; + grpc::string serialized_request_proto; + + if (argc == 3) { + message_text = argv[2]; + if (!FLAGS_infile.empty()) { + fprintf(stderr, "warning: message given in argv, ignoring --infile.\n"); + } + } else { + std::stringstream input_stream; + if (FLAGS_infile.empty()) { + if (isatty(STDIN_FILENO)) { + fprintf(stderr, "reading request message from stdin...\n"); + } + input_stream << std::cin.rdbuf(); + } else { + std::ifstream input_file(FLAGS_infile, std::ios::in | std::ios::binary); + input_stream << input_file.rdbuf(); + input_file.close(); + } + message_text = input_stream.str(); + } + + if (!FLAGS_binary_input || !FLAGS_binary_output) { + std::shared_ptr<grpc::Channel> channel = + grpc::CreateChannel(server_address, cred.GetCredentials()); + parser.reset( + new grpc::testing::ProtoFileParser(FLAGS_remotedb ? channel : nullptr, + FLAGS_proto_path, FLAGS_protofiles)); + if (parser->HasError()) { + return false; + } + } + + if (FLAGS_binary_input) { + serialized_request_proto = message_text; + } else { + serialized_request_proto = + parser->GetSerializedProtoFromMessageType(type_name, message_text); + if (parser->HasError()) { + return false; + } + } + + if (FLAGS_binary_output) { + output_ss << serialized_request_proto; + } else { + grpc::string output_text = parser->GetTextFormatFromMessageType( + type_name, serialized_request_proto); + if (parser->HasError()) { + return false; + } + output_ss << output_text << std::endl; + } + + return callback(output_ss.str()); +} + +bool GrpcTool::ToText(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Convert binary message to text\n" + " grpc_cli totext <protofiles> <type>\n" + " <protofiles> ; Comma separated list of proto files\n" + " <type> ; Protocol buffer type name\n" + " --proto_path ; The search path of proto files\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n"); + + FLAGS_protofiles = argv[0]; + FLAGS_remotedb = false; + FLAGS_binary_input = true; + FLAGS_binary_output = false; + return ParseMessage(argc, argv, cred, callback); +} + +bool GrpcTool::ToBinary(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Convert text message to binary\n" + " grpc_cli tobinary <protofiles> <type> [<message>]\n" + " <protofiles> ; Comma separated list of proto files\n" + " <type> ; Protocol buffer type name\n" + " --proto_path ; The search path of proto files\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n"); + + FLAGS_protofiles = argv[0]; + FLAGS_remotedb = false; + FLAGS_binary_input = false; + FLAGS_binary_output = true; + return ParseMessage(argc, argv, cred, callback); +} + } // namespace testing } // namespace grpc diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index 1ff8172306..33ce611a60 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -86,9 +86,18 @@ using grpc::testing::EchoResponse; " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ "{}\n" +#define ECHO_RESPONSE_MESSAGE \ + "message: \"echo\"\n" \ + "param {\n" \ + " host: \"localhost\"\n" \ + " peer: \"peer\"\n" \ + "}\n\n" + namespace grpc { namespace testing { +DECLARE_bool(binary_input); +DECLARE_bool(binary_output); DECLARE_bool(l); namespace { @@ -338,6 +347,47 @@ TEST_F(GrpcToolTest, CallCommand) { ShutdownServer(); } +TEST_F(GrpcToolTest, ParseCommand) { + // Test input "grpc_cli parse localhost:<port> grpc.testing.EchoResponse + // ECHO_RESPONSE_MESSAGE" + std::stringstream output_stream; + std::stringstream binary_output_stream; + + const grpc::string server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "parse", server_address.c_str(), + "grpc.testing.EchoResponse", ECHO_RESPONSE_MESSAGE}; + + FLAGS_binary_input = false; + FLAGS_binary_output = false; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + // Expected output: ECHO_RESPONSE_MESSAGE + EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE)); + + // Parse text message to binary message and then parse it back to text message + output_stream.str(grpc::string()); + output_stream.clear(); + FLAGS_binary_output = true; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + grpc::string binary_data = output_stream.str(); + output_stream.str(grpc::string()); + output_stream.clear(); + argv[4] = binary_data.c_str(); + FLAGS_binary_input = true; + FLAGS_binary_output = false; + EXPECT_TRUE(0 == GrpcToolMainLib(5, argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: ECHO_RESPONSE_MESSAGE + EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE)); + + ShutdownServer(); +} + TEST_F(GrpcToolTest, TooFewArguments) { // Test input "grpc_cli call Echo" std::stringstream output_stream; diff --git a/test/cpp/util/proto_file_parser.cc b/test/cpp/util/proto_file_parser.cc index 3e524227e5..bc8a6083f4 100644 --- a/test/cpp/util/proto_file_parser.cc +++ b/test/cpp/util/proto_file_parser.cc @@ -172,19 +172,19 @@ grpc::string ProtoFileParser::GetFullMethodName(const grpc::string& method) { return method_descriptor->full_name(); } -grpc::string ProtoFileParser::GetFormatedMethodName( +grpc::string ProtoFileParser::GetFormattedMethodName( const grpc::string& method) { has_error_ = false; - grpc::string formated_method_name = GetFullMethodName(method); + grpc::string formatted_method_name = GetFullMethodName(method); if (has_error_) { return ""; } - size_t last_dot = formated_method_name.find_last_of('.'); + size_t last_dot = formatted_method_name.find_last_of('.'); if (last_dot != grpc::string::npos) { - formated_method_name[last_dot] = '/'; + formatted_method_name[last_dot] = '/'; } - formated_method_name.insert(formated_method_name.begin(), '/'); - return formated_method_name; + formatted_method_name.insert(formatted_method_name.begin(), '/'); + return formatted_method_name; } grpc::string ProtoFileParser::GetMessageTypeFromMethod( diff --git a/test/cpp/util/proto_file_parser.h b/test/cpp/util/proto_file_parser.h index eda3991e72..c1070a37b5 100644 --- a/test/cpp/util/proto_file_parser.h +++ b/test/cpp/util/proto_file_parser.h @@ -64,9 +64,9 @@ class ProtoFileParser { // descriptor database queries. grpc::string GetFullMethodName(const grpc::string& method); - // Formated method name is in the form of /Service/Method, it's good to be + // Formatted method name is in the form of /Service/Method, it's good to be // used as the argument of Stub::Call() - grpc::string GetFormatedMethodName(const grpc::string& method); + grpc::string GetFormattedMethodName(const grpc::string& method); grpc::string GetSerializedProtoFromMethod( const grpc::string& method, const grpc::string& text_format_proto, diff --git a/test/http2_test/http2_base_server.py b/test/http2_test/http2_base_server.py new file mode 100644 index 0000000000..ee7719b1a8 --- /dev/null +++ b/test/http2_test/http2_base_server.py @@ -0,0 +1,205 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import messages_pb2 +import struct + +import h2 +import h2.connection +import twisted +import twisted.internet +import twisted.internet.protocol + +_READ_CHUNK_SIZE = 16384 +_GRPC_HEADER_SIZE = 5 + +class H2ProtocolBaseServer(twisted.internet.protocol.Protocol): + def __init__(self): + self._conn = h2.connection.H2Connection(client_side=False) + self._recv_buffer = {} + self._handlers = {} + self._handlers['ConnectionMade'] = self.on_connection_made_default + self._handlers['DataReceived'] = self.on_data_received_default + self._handlers['WindowUpdated'] = self.on_window_update_default + self._handlers['RequestReceived'] = self.on_request_received_default + self._handlers['SendDone'] = self.on_send_done_default + self._handlers['ConnectionLost'] = self.on_connection_lost + self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default + self._stream_status = {} + self._send_remaining = {} + self._outstanding_pings = 0 + + def set_handlers(self, handlers): + self._handlers = handlers + + def connectionMade(self): + self._handlers['ConnectionMade']() + + def connectionLost(self, reason): + self._handlers['ConnectionLost'](reason) + + def on_connection_made_default(self): + logging.info('Connection Made') + self._conn.initiate_connection() + self.transport.setTcpNoDelay(True) + self.transport.write(self._conn.data_to_send()) + + def on_connection_lost(self, reason): + logging.info('Disconnected %s' % reason) + twisted.internet.reactor.callFromThread(twisted.internet.reactor.stop) + + def dataReceived(self, data): + try: + events = self._conn.receive_data(data) + except h2.exceptions.ProtocolError: + # this try/except block catches exceptions due to race between sending + # GOAWAY and processing a response in flight. + return + if self._conn.data_to_send: + self.transport.write(self._conn.data_to_send()) + for event in events: + if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'): + logging.info('RequestReceived Event for stream: %d' % event.stream_id) + self._handlers['RequestReceived'](event) + elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'): + logging.info('DataReceived Event for stream: %d' % event.stream_id) + self._handlers['DataReceived'](event) + elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'): + logging.info('WindowUpdated Event for stream: %d' % event.stream_id) + self._handlers['WindowUpdated'](event) + elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'): + logging.info('PingAcknowledged Event') + self._handlers['PingAcknowledged'](event) + self.transport.write(self._conn.data_to_send()) + + def on_ping_acknowledged_default(self, event): + logging.info('ping acknowledged') + self._outstanding_pings -= 1 + + def on_data_received_default(self, event): + self._conn.acknowledge_received_data(len(event.data), event.stream_id) + self._recv_buffer[event.stream_id] += event.data + + def on_request_received_default(self, event): + self._recv_buffer[event.stream_id] = '' + self._stream_id = event.stream_id + self._stream_status[event.stream_id] = True + self._conn.send_headers( + stream_id=event.stream_id, + headers=[ + (':status', '200'), + ('content-type', 'application/grpc'), + ('grpc-encoding', 'identity'), + ('grpc-accept-encoding', 'identity,deflate,gzip'), + ], + ) + self.transport.write(self._conn.data_to_send()) + + def on_window_update_default(self, event): + # send pending data, if any + self.default_send(event.stream_id) + + def send_reset_stream(self): + self._conn.reset_stream(self._stream_id) + self.transport.write(self._conn.data_to_send()) + + def setup_send(self, data_to_send, stream_id): + logging.info('Setting up data to send for stream_id: %d' % stream_id) + self._send_remaining[stream_id] = len(data_to_send) + self._send_offset = 0 + self._data_to_send = data_to_send + self.default_send(stream_id) + + def default_send(self, stream_id): + if not self._send_remaining.has_key(stream_id): + # not setup to send data yet + return + + while self._send_remaining[stream_id] > 0: + lfcw = self._conn.local_flow_control_window(stream_id) + if lfcw == 0: + break + chunk_size = min(lfcw, _READ_CHUNK_SIZE) + bytes_to_send = min(chunk_size, self._send_remaining[stream_id]) + logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d' % + (lfcw, self._send_offset, self._send_offset + bytes_to_send, + stream_id)) + data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send] + try: + self._conn.send_data(stream_id, data, False) + except h2.exceptions.ProtocolError: + logging.info('Stream %d is closed' % stream_id) + break + self._send_remaining[stream_id] -= bytes_to_send + self._send_offset += bytes_to_send + if self._send_remaining[stream_id] == 0: + self._handlers['SendDone'](stream_id) + + def default_ping(self): + logging.info('sending ping') + self._outstanding_pings += 1 + self._conn.ping(b'\x00'*8) + self.transport.write(self._conn.data_to_send()) + + def on_send_done_default(self, stream_id): + if self._stream_status[stream_id]: + self._stream_status[stream_id] = False + self.default_send_trailer(stream_id) + else: + logging.error('Stream %d is already closed' % stream_id) + + def default_send_trailer(self, stream_id): + logging.info('Sending trailer for stream id %d' % stream_id) + self._conn.send_headers(stream_id, + headers=[ ('grpc-status', '0') ], + end_stream=True + ) + self.transport.write(self._conn.data_to_send()) + + @staticmethod + def default_response_data(response_size): + sresp = messages_pb2.SimpleResponse() + sresp.payload.body = b'\x00'*response_size + serialized_resp_proto = sresp.SerializeToString() + response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto + return response_data + + def parse_received_data(self, stream_id): + """ returns a grpc framed string of bytes containing response proto of the size + asked in request """ + recv_buffer = self._recv_buffer[stream_id] + grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0] + if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size: + return None + req_proto_str = recv_buffer[5:5+grpc_msg_size] + sr = messages_pb2.SimpleRequest() + sr.ParseFromString(req_proto_str) + logging.info('Parsed request for stream %d: response_size=%s' % (stream_id, sr.response_size)) + return sr diff --git a/test/http2_test/http2_test_server.py b/test/http2_test/http2_test_server.py new file mode 100644 index 0000000000..44e36d34b6 --- /dev/null +++ b/test/http2_test/http2_test_server.py @@ -0,0 +1,90 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""HTTP2 Test Server""" + +import argparse +import logging +import twisted +import twisted.internet +import twisted.internet.endpoints +import twisted.internet.reactor + +import http2_base_server +import test_goaway +import test_max_streams +import test_ping +import test_rst_after_data +import test_rst_after_header +import test_rst_during_data + +_TEST_CASE_MAPPING = { + 'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader, + 'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData, + 'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData, + 'goaway': test_goaway.TestcaseGoaway, + 'ping': test_ping.TestcasePing, + 'max_streams': test_max_streams.TestcaseSettingsMaxStreams, +} + +class H2Factory(twisted.internet.protocol.Factory): + def __init__(self, testcase): + logging.info('Creating H2Factory for new connection.') + self._num_streams = 0 + self._testcase = testcase + + def buildProtocol(self, addr): + self._num_streams += 1 + logging.info('New Connection: %d' % self._num_streams) + if not _TEST_CASE_MAPPING.has_key(self._testcase): + logging.error('Unknown test case: %s' % self._testcase) + assert(0) + else: + t = _TEST_CASE_MAPPING[self._testcase] + + if self._testcase == 'goaway': + return t(self._num_streams).get_base_server() + else: + return t().get_base_server() + +if __name__ == '__main__': + logging.basicConfig( + format='%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s', + level=logging.INFO) + parser = argparse.ArgumentParser() + parser.add_argument('--test_case', choices=sorted(_TEST_CASE_MAPPING.keys()), + help='test case to run', required=True) + parser.add_argument('--port', type=int, default=8080, + help='port to run the server (default: 8080)') + args = parser.parse_args() + logging.info('Running test case %s on port %d' % (args.test_case, args.port)) + endpoint = twisted.internet.endpoints.TCP4ServerEndpoint( + twisted.internet.reactor, args.port, backlog=128) + endpoint.listen(H2Factory(args.test_case)) + twisted.internet.reactor.run() diff --git a/test/http2_test/messages_pb2.py b/test/http2_test/messages_pb2.py new file mode 100644 index 0000000000..86cf5a8970 --- /dev/null +++ b/test/http2_test/messages_pb2.py @@ -0,0 +1,661 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: messages.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='messages.proto', + package='grpc.testing', + syntax='proto3', + serialized_pb=_b('\n\x0emessages.proto\x12\x0cgrpc.testing\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"@\n\x07Payload\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\"+\n\nEchoStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xce\x02\n\rSimpleRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x15\n\rresponse_size\x18\x02 \x01(\x05\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x15\n\rfill_username\x18\x04 \x01(\x08\x12\x18\n\x10\x66ill_oauth_scope\x18\x05 \x01(\x08\x12\x34\n\x13response_compressed\x18\x06 \x01(\x0b\x32\x17.grpc.testing.BoolValue\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\x12\x32\n\x11\x65xpect_compressed\x18\x08 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"_\n\x0eSimpleResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x13\n\x0boauth_scope\x18\x03 \x01(\t\"w\n\x19StreamingInputCallRequest\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x32\n\x11\x65xpect_compressed\x18\x02 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"=\n\x1aStreamingInputCallResponse\x12\x1f\n\x17\x61ggregated_payload_size\x18\x01 \x01(\x05\"d\n\x12ResponseParameters\x12\x0c\n\x04size\x18\x01 \x01(\x05\x12\x13\n\x0binterval_us\x18\x02 \x01(\x05\x12+\n\ncompressed\x18\x03 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"\xe8\x01\n\x1aStreamingOutputCallRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12=\n\x13response_parameters\x18\x02 \x03(\x0b\x32 .grpc.testing.ResponseParameters\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\"E\n\x1bStreamingOutputCallResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\"3\n\x0fReconnectParams\x12 \n\x18max_reconnect_backoff_ms\x18\x01 \x01(\x05\"3\n\rReconnectInfo\x12\x0e\n\x06passed\x18\x01 \x01(\x08\x12\x12\n\nbackoff_ms\x18\x02 \x03(\x05*\x1f\n\x0bPayloadType\x12\x10\n\x0c\x43OMPRESSABLE\x10\x00\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_PAYLOADTYPE = _descriptor.EnumDescriptor( + name='PayloadType', + full_name='grpc.testing.PayloadType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COMPRESSABLE', index=0, number=0, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1303, + serialized_end=1334, +) +_sym_db.RegisterEnumDescriptor(_PAYLOADTYPE) + +PayloadType = enum_type_wrapper.EnumTypeWrapper(_PAYLOADTYPE) +COMPRESSABLE = 0 + + + +_BOOLVALUE = _descriptor.Descriptor( + name='BoolValue', + full_name='grpc.testing.BoolValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='grpc.testing.BoolValue.value', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32, + serialized_end=58, +) + + +_PAYLOAD = _descriptor.Descriptor( + name='Payload', + full_name='grpc.testing.Payload', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='grpc.testing.Payload.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='body', full_name='grpc.testing.Payload.body', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=60, + serialized_end=124, +) + + +_ECHOSTATUS = _descriptor.Descriptor( + name='EchoStatus', + full_name='grpc.testing.EchoStatus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='grpc.testing.EchoStatus.code', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='message', full_name='grpc.testing.EchoStatus.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=126, + serialized_end=169, +) + + +_SIMPLEREQUEST = _descriptor.Descriptor( + name='SimpleRequest', + full_name='grpc.testing.SimpleRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='response_type', full_name='grpc.testing.SimpleRequest.response_type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_size', full_name='grpc.testing.SimpleRequest.response_size', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.SimpleRequest.payload', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fill_username', full_name='grpc.testing.SimpleRequest.fill_username', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fill_oauth_scope', full_name='grpc.testing.SimpleRequest.fill_oauth_scope', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_compressed', full_name='grpc.testing.SimpleRequest.response_compressed', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_status', full_name='grpc.testing.SimpleRequest.response_status', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expect_compressed', full_name='grpc.testing.SimpleRequest.expect_compressed', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=172, + serialized_end=506, +) + + +_SIMPLERESPONSE = _descriptor.Descriptor( + name='SimpleResponse', + full_name='grpc.testing.SimpleResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.SimpleResponse.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='username', full_name='grpc.testing.SimpleResponse.username', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oauth_scope', full_name='grpc.testing.SimpleResponse.oauth_scope', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=508, + serialized_end=603, +) + + +_STREAMINGINPUTCALLREQUEST = _descriptor.Descriptor( + name='StreamingInputCallRequest', + full_name='grpc.testing.StreamingInputCallRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingInputCallRequest.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expect_compressed', full_name='grpc.testing.StreamingInputCallRequest.expect_compressed', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=605, + serialized_end=724, +) + + +_STREAMINGINPUTCALLRESPONSE = _descriptor.Descriptor( + name='StreamingInputCallResponse', + full_name='grpc.testing.StreamingInputCallResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='aggregated_payload_size', full_name='grpc.testing.StreamingInputCallResponse.aggregated_payload_size', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=726, + serialized_end=787, +) + + +_RESPONSEPARAMETERS = _descriptor.Descriptor( + name='ResponseParameters', + full_name='grpc.testing.ResponseParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='grpc.testing.ResponseParameters.size', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interval_us', full_name='grpc.testing.ResponseParameters.interval_us', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='compressed', full_name='grpc.testing.ResponseParameters.compressed', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=789, + serialized_end=889, +) + + +_STREAMINGOUTPUTCALLREQUEST = _descriptor.Descriptor( + name='StreamingOutputCallRequest', + full_name='grpc.testing.StreamingOutputCallRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='response_type', full_name='grpc.testing.StreamingOutputCallRequest.response_type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_parameters', full_name='grpc.testing.StreamingOutputCallRequest.response_parameters', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingOutputCallRequest.payload', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_status', full_name='grpc.testing.StreamingOutputCallRequest.response_status', index=3, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=892, + serialized_end=1124, +) + + +_STREAMINGOUTPUTCALLRESPONSE = _descriptor.Descriptor( + name='StreamingOutputCallResponse', + full_name='grpc.testing.StreamingOutputCallResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingOutputCallResponse.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1126, + serialized_end=1195, +) + + +_RECONNECTPARAMS = _descriptor.Descriptor( + name='ReconnectParams', + full_name='grpc.testing.ReconnectParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_reconnect_backoff_ms', full_name='grpc.testing.ReconnectParams.max_reconnect_backoff_ms', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1197, + serialized_end=1248, +) + + +_RECONNECTINFO = _descriptor.Descriptor( + name='ReconnectInfo', + full_name='grpc.testing.ReconnectInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='passed', full_name='grpc.testing.ReconnectInfo.passed', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='backoff_ms', full_name='grpc.testing.ReconnectInfo.backoff_ms', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1250, + serialized_end=1301, +) + +_PAYLOAD.fields_by_name['type'].enum_type = _PAYLOADTYPE +_SIMPLEREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE +_SIMPLEREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_SIMPLEREQUEST.fields_by_name['response_compressed'].message_type = _BOOLVALUE +_SIMPLEREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS +_SIMPLEREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE +_SIMPLERESPONSE.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGINPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGINPUTCALLREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE +_RESPONSEPARAMETERS.fields_by_name['compressed'].message_type = _BOOLVALUE +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_parameters'].message_type = _RESPONSEPARAMETERS +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS +_STREAMINGOUTPUTCALLRESPONSE.fields_by_name['payload'].message_type = _PAYLOAD +DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE +DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD +DESCRIPTOR.message_types_by_name['EchoStatus'] = _ECHOSTATUS +DESCRIPTOR.message_types_by_name['SimpleRequest'] = _SIMPLEREQUEST +DESCRIPTOR.message_types_by_name['SimpleResponse'] = _SIMPLERESPONSE +DESCRIPTOR.message_types_by_name['StreamingInputCallRequest'] = _STREAMINGINPUTCALLREQUEST +DESCRIPTOR.message_types_by_name['StreamingInputCallResponse'] = _STREAMINGINPUTCALLRESPONSE +DESCRIPTOR.message_types_by_name['ResponseParameters'] = _RESPONSEPARAMETERS +DESCRIPTOR.message_types_by_name['StreamingOutputCallRequest'] = _STREAMINGOUTPUTCALLREQUEST +DESCRIPTOR.message_types_by_name['StreamingOutputCallResponse'] = _STREAMINGOUTPUTCALLRESPONSE +DESCRIPTOR.message_types_by_name['ReconnectParams'] = _RECONNECTPARAMS +DESCRIPTOR.message_types_by_name['ReconnectInfo'] = _RECONNECTINFO +DESCRIPTOR.enum_types_by_name['PayloadType'] = _PAYLOADTYPE + +BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), dict( + DESCRIPTOR = _BOOLVALUE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.BoolValue) + )) +_sym_db.RegisterMessage(BoolValue) + +Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict( + DESCRIPTOR = _PAYLOAD, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.Payload) + )) +_sym_db.RegisterMessage(Payload) + +EchoStatus = _reflection.GeneratedProtocolMessageType('EchoStatus', (_message.Message,), dict( + DESCRIPTOR = _ECHOSTATUS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.EchoStatus) + )) +_sym_db.RegisterMessage(EchoStatus) + +SimpleRequest = _reflection.GeneratedProtocolMessageType('SimpleRequest', (_message.Message,), dict( + DESCRIPTOR = _SIMPLEREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.SimpleRequest) + )) +_sym_db.RegisterMessage(SimpleRequest) + +SimpleResponse = _reflection.GeneratedProtocolMessageType('SimpleResponse', (_message.Message,), dict( + DESCRIPTOR = _SIMPLERESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.SimpleResponse) + )) +_sym_db.RegisterMessage(SimpleResponse) + +StreamingInputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingInputCallRequest', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGINPUTCALLREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallRequest) + )) +_sym_db.RegisterMessage(StreamingInputCallRequest) + +StreamingInputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingInputCallResponse', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGINPUTCALLRESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallResponse) + )) +_sym_db.RegisterMessage(StreamingInputCallResponse) + +ResponseParameters = _reflection.GeneratedProtocolMessageType('ResponseParameters', (_message.Message,), dict( + DESCRIPTOR = _RESPONSEPARAMETERS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ResponseParameters) + )) +_sym_db.RegisterMessage(ResponseParameters) + +StreamingOutputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingOutputCallRequest', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGOUTPUTCALLREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallRequest) + )) +_sym_db.RegisterMessage(StreamingOutputCallRequest) + +StreamingOutputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingOutputCallResponse', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGOUTPUTCALLRESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallResponse) + )) +_sym_db.RegisterMessage(StreamingOutputCallResponse) + +ReconnectParams = _reflection.GeneratedProtocolMessageType('ReconnectParams', (_message.Message,), dict( + DESCRIPTOR = _RECONNECTPARAMS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ReconnectParams) + )) +_sym_db.RegisterMessage(ReconnectParams) + +ReconnectInfo = _reflection.GeneratedProtocolMessageType('ReconnectInfo', (_message.Message,), dict( + DESCRIPTOR = _RECONNECTINFO, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ReconnectInfo) + )) +_sym_db.RegisterMessage(ReconnectInfo) + + +# @@protoc_insertion_point(module_scope) diff --git a/test/http2_test/test_goaway.py b/test/http2_test/test_goaway.py new file mode 100644 index 0000000000..61f4beb74a --- /dev/null +++ b/test/http2_test/test_goaway.py @@ -0,0 +1,77 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import time + +import http2_base_server + +class TestcaseGoaway(object): + """ + This test does the following: + Process incoming request normally, i.e. send headers, data and trailers. + Then send a GOAWAY frame with the stream id of the processed request. + It checks that the next request is made on a different TCP connection. + """ + def __init__(self, iteration): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done + self._base_server._handlers['ConnectionLost'] = self.on_connection_lost + self._ready_to_send = False + self._iteration = iteration + + def get_base_server(self): + return self._base_server + + def on_connection_lost(self, reason): + logging.info('Disconnect received. Count %d' % self._iteration) + # _iteration == 2 => Two different connections have been used. + if self._iteration == 2: + self._base_server.on_connection_lost(reason) + + def on_send_done(self, stream_id): + self._base_server.on_send_done_default(stream_id) + logging.info('Sending GOAWAY for stream %d:' % stream_id) + self._base_server._conn.close_connection(error_code=0, additional_data=None, last_stream_id=stream_id) + self._base_server._stream_status[stream_id] = False + + def on_request_received(self, event): + self._ready_to_send = False + self._base_server.on_request_received_default(event) + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + self._base_server.setup_send(response_data, event.stream_id) diff --git a/test/http2_test/test_max_streams.py b/test/http2_test/test_max_streams.py new file mode 100644 index 0000000000..9942b1bb9a --- /dev/null +++ b/test/http2_test/test_max_streams.py @@ -0,0 +1,63 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import hyperframe.frame +import logging + +import http2_base_server + +class TestcaseSettingsMaxStreams(object): + """ + This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point + only 1 stream is active. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['ConnectionMade'] = self.on_connection_made + + def get_base_server(self): + return self._base_server + + def on_connection_made(self): + logging.info('Connection Made') + self._base_server._conn.initiate_connection() + self._base_server._conn.update_settings( + {hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1}) + self._base_server.transport.setTcpNoDelay(True) + self._base_server.transport.write(self._base_server._conn.data_to_send()) + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response of size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._base_server.setup_send(response_data, event.stream_id) + # TODO (makdharma): Add assertion to check number of live streams diff --git a/test/http2_test/test_ping.py b/test/http2_test/test_ping.py new file mode 100644 index 0000000000..da41fd01bb --- /dev/null +++ b/test/http2_test/test_ping.py @@ -0,0 +1,67 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging + +import http2_base_server + +class TestcasePing(object): + """ + This test injects PING frames before and after header and data. Keeps count + of outstanding ping response and asserts when the count is non-zero at the + end of the test. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['ConnectionLost'] = self.on_connection_lost + + def get_base_server(self): + return self._base_server + + def on_request_received(self, event): + self._base_server.default_ping() + self._base_server.on_request_received_default(event) + self._base_server.default_ping() + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._base_server.default_ping() + self._base_server.setup_send(response_data, event.stream_id) + self._base_server.default_ping() + + def on_connection_lost(self, reason): + logging.info('Disconnect received. Ping Count %d' % self._base_server._outstanding_pings) + assert(self._base_server._outstanding_pings == 0) + self._base_server.on_connection_lost(reason) diff --git a/test/http2_test/test_rst_after_data.py b/test/http2_test/test_rst_after_data.py new file mode 100644 index 0000000000..9236025395 --- /dev/null +++ b/test/http2_test/test_rst_after_data.py @@ -0,0 +1,57 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import http2_base_server + +class TestcaseRstStreamAfterData(object): + """ + In response to an incoming request, this test sends headers, followed by + data, followed by a reset stream frame. Client asserts that the RPC failed. + Client needs to deliver the complete message to the application layer. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done + + def get_base_server(self): + return self._base_server + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + self._base_server.setup_send(response_data, event.stream_id) + # send reset stream + + def on_send_done(self, stream_id): + self._base_server.send_reset_stream() + self._base_server._stream_status[stream_id] = False diff --git a/test/http2_test/test_rst_after_header.py b/test/http2_test/test_rst_after_header.py new file mode 100644 index 0000000000..41e1adb8ad --- /dev/null +++ b/test/http2_test/test_rst_after_header.py @@ -0,0 +1,48 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import http2_base_server + +class TestcaseRstStreamAfterHeader(object): + """ + In response to an incoming request, this test sends headers, followed by + a reset stream frame. Client asserts that the RPC failed. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + + def get_base_server(self): + return self._base_server + + def on_request_received(self, event): + # send initial headers + self._base_server.on_request_received_default(event) + # send reset stream + self._base_server.send_reset_stream() diff --git a/test/http2_test/test_rst_during_data.py b/test/http2_test/test_rst_during_data.py new file mode 100644 index 0000000000..7c859db267 --- /dev/null +++ b/test/http2_test/test_rst_during_data.py @@ -0,0 +1,58 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import http2_base_server + +class TestcaseRstStreamDuringData(object): + """ + In response to an incoming request, this test sends headers, followed by + some data, followed by a reset stream frame. Client asserts that the RPC + failed and does not deliver the message to the application. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done + + def get_base_server(self): + return self._base_server + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + response_len = len(response_data) + truncated_response_data = response_data[0:response_len/2] + self._base_server.setup_send(truncated_response_data, event.stream_id) + + def on_send_done(self, stream_id): + self._base_server.send_reset_stream() + self._base_server._stream_status[stream_id] = False diff --git a/third_party/google_benchmark b/third_party/benchmark -Subproject 44c25c892a6229b20db7cd9dc05584ea865896d +Subproject 44c25c892a6229b20db7cd9dc05584ea865896d diff --git a/tools/buildgen/generate_build_additions.sh b/tools/buildgen/generate_build_additions.sh index 1ea47042f4..a4373ed350 100644 --- a/tools/buildgen/generate_build_additions.sh +++ b/tools/buildgen/generate_build_additions.sh @@ -28,9 +28,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +set -e + gen_build_yaml_dirs=" \ src/boringssl \ - src/google_benchmark \ + src/benchmark \ src/proto \ src/zlib \ test/core/bad_client \ diff --git a/tools/buildgen/generate_projects.py b/tools/buildgen/generate_projects.py index 5e78ad52d6..f8ddaf4963 100755 --- a/tools/buildgen/generate_projects.py +++ b/tools/buildgen/generate_projects.py @@ -36,7 +36,7 @@ import shutil import sys import tempfile import multiprocessing -sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests')) +sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils')) assert sys.argv[1:], 'run generate_projects.sh instead of this directly' diff --git a/tools/codegen/core/gen_nano_proto.sh b/tools/codegen/core/gen_nano_proto.sh index df107c208f..99e49814b8 100755 --- a/tools/codegen/core/gen_nano_proto.sh +++ b/tools/codegen/core/gen_nano_proto.sh @@ -42,46 +42,6 @@ # 4: Output dir not an absolute path. # 5: Couldn't create output directory (2nd argument). -read -r -d '' COPYRIGHT <<'EOF' -/* - * - * Copyright <YEAR>, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -EOF - -CURRENT_YEAR=$(date +%Y) -COPYRIGHT_FILE=$(mktemp) -echo "${COPYRIGHT/<YEAR>/$CURRENT_YEAR}" > $COPYRIGHT_FILE - set -ex if [ $# -lt 2 ] || [ $# -gt 3 ]; then echo "Usage: $0 <input.proto> <absolute path to output dir> [grpc path]" @@ -143,13 +103,6 @@ readonly UC_PROTO_BASENAME=`echo $PROTO_BASENAME | tr [a-z] [A-Z]` sed -i "s:PB_${UC_PROTO_BASENAME}_PB_H_INCLUDED:GRPC_${INCLUDE_GUARD_BASE}_${UC_PROTO_BASENAME}_PB_H:g" \ "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" -# prepend copyright -TMPFILE=$(mktemp) -cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" > $TMPFILE -mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" -cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" > $TMPFILE -mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" - deactivate rm -rf $VENV_DIR diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py index f06e5f1d1a..718bb563f3 100755 --- a/tools/distrib/check_copyright.py +++ b/tools/distrib/check_copyright.py @@ -92,9 +92,23 @@ LICENSE_PREFIX = { 'LICENSE': '', } -KNOWN_BAD = set([ +_EXEMPT = frozenset(( + # Generated protocol compiler output. + 'examples/python/helloworld/helloworld_pb2.py', + 'examples/python/helloworld/helloworld_pb2_grpc.py', + 'examples/python/multiplex/helloworld_pb2.py', + 'examples/python/multiplex/helloworld_pb2_grpc.py', + 'examples/python/multiplex/route_guide_pb2.py', + 'examples/python/multiplex/route_guide_pb2_grpc.py', + 'examples/python/route_guide/route_guide_pb2.py', + 'examples/python/route_guide/route_guide_pb2_grpc.py', + + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c', + + # An older file originally from outside gRPC. 'src/php/tests/bootstrap.php', -]) +)) RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+), Google Inc\.' @@ -140,7 +154,8 @@ except subprocess.CalledProcessError: sys.exit(0) for filename in filename_list: - if filename in KNOWN_BAD: continue + if filename in _EXEMPT: + continue ext = os.path.splitext(filename)[1] base = os.path.basename(filename) if ext in RE_LICENSE: diff --git a/tools/distrib/check_nanopb_output.sh b/tools/distrib/check_nanopb_output.sh index c0707051a6..eb64e23daf 100755 --- a/tools/distrib/check_nanopb_output.sh +++ b/tools/distrib/check_nanopb_output.sh @@ -37,7 +37,7 @@ readonly PROTOBUF_INSTALL_PREFIX="$(mktemp -d)" pushd third_party/protobuf ./autogen.sh ./configure --prefix="$PROTOBUF_INSTALL_PREFIX" -make +make -j 8 make install #ldconfig popd @@ -51,7 +51,7 @@ fi # stack up and change to nanopb's proto generator directory pushd third_party/nanopb/generator/proto export PATH="$PROTOC_BIN_PATH:$PATH" -make +make -j 8 # back to the root directory popd diff --git a/tools/distrib/clang_format_code.sh b/tools/distrib/clang_format_code.sh index 858e074898..13e018709f 100755 --- a/tools/distrib/clang_format_code.sh +++ b/tools/distrib/clang_format_code.sh @@ -32,9 +32,15 @@ set -ex # change to root directory cd $(dirname $0)/../.. +REPO_ROOT=$(pwd) -# build clang-format docker image -docker build -t grpc_clang_format tools/dockerfile/grpc_clang_format +if [ "$CLANG_FORMAT_SKIP_DOCKER" == "" ] +then + # build clang-format docker image + docker build -t grpc_clang_format tools/dockerfile/grpc_clang_format -# run clang-format against the checked out codebase -docker run -e TEST=$TEST -e CHANGED_FILES="$CHANGED_FILES" --rm=true -v ${HOST_GIT_ROOT:-`pwd`}:/local-code -t grpc_clang_format /clang_format_all_the_things.sh + # run clang-format against the checked out codebase + docker run -e TEST=$TEST -e CHANGED_FILES="$CHANGED_FILES" -e CLANG_FORMAT_ROOT="/local-code" --rm=true -v "${REPO_ROOT}":/local-code -t grpc_clang_format /clang_format_all_the_things.sh +else + CLANG_FORMAT_ROOT="${REPO_ROOT}" tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh +fi diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py index 622317920d..38ffcd6e0e 100755 --- a/tools/distrib/python/docgen.py +++ b/tools/distrib/python/docgen.py @@ -94,6 +94,7 @@ if args.submit: # specified repository, edit it, and push it. It's up to the user to then go # onto GitHub and make a PR against grpc/grpc:gh-pages. repo_parent_dir = tempfile.mkdtemp() + print('Documentation parent directory: {}'.format(repo_parent_dir)) repo_dir = os.path.join(repo_parent_dir, 'grpc') python_doc_dir = os.path.join(repo_dir, 'python') doc_branch = args.doc_branch diff --git a/tools/distrib/python/grpcio_tools/MANIFEST.in b/tools/distrib/python/grpcio_tools/MANIFEST.in index 7712834d64..11ce367747 100644 --- a/tools/distrib/python/grpcio_tools/MANIFEST.in +++ b/tools/distrib/python/grpcio_tools/MANIFEST.in @@ -2,6 +2,6 @@ include grpc_version.py include protoc_deps.py include protoc_lib_deps.py include README.rst -graft grpc +graft grpc_tools graft grpc_root graft third_party diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/__init__.py b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/__init__.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx b/tools/distrib/python/grpcio_tools/grpc_tools/_protoc_compiler.pyx index a6530127c0..81034fad5e 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx +++ b/tools/distrib/python/grpcio_tools/grpc_tools/_protoc_compiler.pyx @@ -29,7 +29,7 @@ from libc cimport stdlib -cdef extern from "grpc/tools/main.h": +cdef extern from "grpc_tools/main.h": int protoc_main(int argc, char *argv[]) def run_main(list args not None): diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/command.py b/tools/distrib/python/grpcio_tools/grpc_tools/command.py index 424fd90411..31b3331a66 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/command.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/command.py @@ -33,7 +33,7 @@ import sys import setuptools -from grpc.tools import protoc +from grpc_tools import protoc def build_package_protos(package_root): @@ -45,11 +45,11 @@ def build_package_protos(package_root): proto_files.append(os.path.abspath(os.path.join(root, filename))) well_known_protos_include = pkg_resources.resource_filename( - 'grpc.tools', '_proto') + 'grpc_tools', '_proto') for proto_file in proto_files: command = [ - 'grpc.tools.protoc', + 'grpc_tools.protoc', '--proto_path={}'.format(inclusion_root), '--proto_path={}'.format(well_known_protos_include), '--python_out={}'.format(inclusion_root), diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc b/tools/distrib/python/grpcio_tools/grpc_tools/main.cc index 8391839513..0c2fa3180a 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc +++ b/tools/distrib/python/grpcio_tools/grpc_tools/main.cc @@ -32,7 +32,7 @@ #include "src/compiler/python_generator.h" -#include "grpc/tools/main.h" +#include "grpc_tools/main.h" int protoc_main(int argc, char* argv[]) { google::protobuf::compiler::CommandLineInterface cli; diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/main.h b/tools/distrib/python/grpcio_tools/grpc_tools/main.h index ea2860ff02..ea2860ff02 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/main.h +++ b/tools/distrib/python/grpcio_tools/grpc_tools/main.h diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py index e1256a7dd9..63fddb2f06 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py @@ -32,7 +32,7 @@ import pkg_resources import sys -from grpc.tools import _protoc_compiler +from grpc_tools import _protoc_compiler def main(command_arguments): """Run the protocol buffer compiler with the given command-line arguments. @@ -45,5 +45,5 @@ def main(command_arguments): return _protoc_compiler.run_main(command_arguments) if __name__ == '__main__': - proto_include = pkg_resources.resource_filename('grpc.tools', '_proto') + proto_include = pkg_resources.resource_filename('grpc_tools', '_proto') sys.exit(main(sys.argv + ['-I{}'.format(proto_include)])) diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py index a07a586fb2..502d7ef27b 100644 --- a/tools/distrib/python/grpcio_tools/setup.py +++ b/tools/distrib/python/grpcio_tools/setup.py @@ -108,7 +108,7 @@ PROTO_FILES = [ CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE) PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE) -GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools' +GRPC_PYTHON_TOOLS_PACKAGE = 'grpc_tools' GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto' DEFINE_MACROS = () @@ -154,16 +154,16 @@ def package_data(): def extension_modules(): if BUILD_WITH_CYTHON: - plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.pyx')] + plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')] else: - plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.cpp')] + plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')] plugin_sources += [ - os.path.join('grpc', 'tools', 'main.cc'), + os.path.join('grpc_tools', 'main.cc'), os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')] + [ os.path.join(CC_INCLUDE, cc_file) for cc_file in CC_FILES] plugin_ext = extension.Extension( - name='grpc.tools._protoc_compiler', + name='grpc_tools._protoc_compiler', sources=plugin_sources, include_dirs=[ '.', @@ -184,12 +184,11 @@ def extension_modules(): return extensions setuptools.setup( - name='grpcio_tools', + name='grpcio-tools', version=grpc_version.VERSION, license='3-clause BSD', ext_modules=extension_modules(), packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=[ 'protobuf>=3.0.0', 'grpcio>={version}'.format(version=grpc_version.VERSION), diff --git a/tools/dockerfile/grpc_artifact_protoc/Dockerfile b/tools/dockerfile/grpc_artifact_protoc/Dockerfile index 1bbc6e021b..2904a8fa51 100644 --- a/tools/dockerfile/grpc_artifact_protoc/Dockerfile +++ b/tools/dockerfile/grpc_artifact_protoc/Dockerfile @@ -59,5 +59,11 @@ RUN yum install -y devtoolset-1.1 \ devtoolset-1.1-libstdc++-devel \ devtoolset-1.1-libstdc++-devel.i686 || true +# Update Git to version >1.7 to allow cloning submodules with --reference arg. +RUN yum remove -y git +RUN yum install -y epel-release +RUN yum install -y https://centos6.iuscommunity.org/ius-release.rpm +RUN yum install -y git2u + # Start in devtoolset environment that uses GCC 4.7 CMD ["scl", "enable", "devtoolset-1.1", "bash"] diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile index 1d4e8e1a4a..69e624aa41 100644 --- a/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile +++ b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile @@ -34,6 +34,19 @@ FROM quay.io/pypa/manylinux1_x86_64 # Update the package manager RUN yum update -y +############################################################# +# Update Git to allow cloning submodules with --reference arg +RUN yum remove -y git +RUN yum install -y curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc +RUN cd /usr/src && \ + wget https://kernel.org/pub/software/scm/git/git-2.0.5.tar.gz && \ + tar xzf git-2.0.5.tar.gz +RUN cd /usr/src/git-2.0.5 && \ + make prefix=/usr/local/git all && \ + make prefix=/usr/local/git install +ENV PATH /usr/local/git/bin:$PATH +RUN source /etc/bashrc + ################################### # Install Python build requirements RUN /opt/python/cp27-cp27m/bin/pip install cython diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile index 810499695e..9af80078ed 100644 --- a/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile +++ b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile @@ -34,6 +34,19 @@ FROM quay.io/pypa/manylinux1_i686 # Update the package manager RUN yum update -y +############################################################# +# Update Git to allow cloning submodules with --reference arg +RUN yum remove -y git +RUN yum install -y curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc +RUN cd /usr/src && \ + wget https://kernel.org/pub/software/scm/git/git-2.0.5.tar.gz && \ + tar xzf git-2.0.5.tar.gz +RUN cd /usr/src/git-2.0.5 && \ + make prefix=/usr/local/git all && \ + make prefix=/usr/local/git install +ENV PATH /usr/local/git/bin:$PATH +RUN source /etc/bashrc + ################################### # Install Python build requirements RUN /opt/python/cp27-cp27m/bin/pip install cython diff --git a/tools/dockerfile/grpc_clang_format/Dockerfile b/tools/dockerfile/grpc_clang_format/Dockerfile index ab58017a02..85f5e4db74 100644 --- a/tools/dockerfile/grpc_clang_format/Dockerfile +++ b/tools/dockerfile/grpc_clang_format/Dockerfile @@ -27,13 +27,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM ubuntu:wily -RUN apt-get update -RUN apt-get -y install wget +FROM ubuntu:15.10 + +RUN apt-get update && apt-get -y install wget RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - -RUN apt-get update -RUN apt-get -y install clang-format-3.8 +RUN apt-get update && apt-get -y install clang-format-3.8 + ADD clang_format_all_the_things.sh / CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] diff --git a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh index 462c65ab5e..c6e4aabfe6 100755 --- a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh +++ b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh @@ -44,7 +44,7 @@ for dir in $DIRS do for glob in $GLOB do - files="$files `find /local-code/$dir -name $glob -and -not -name *.generated.* -and -not -name *.pb.h -and -not -name *.pb.c -and -not -name *.pb.cc`" + files="$files `find ${CLANG_FORMAT_ROOT}/$dir -name $glob -and -not -name *.generated.* -and -not -name *.pb.h -and -not -name *.pb.c -and -not -name *.pb.cc`" done done @@ -54,7 +54,7 @@ if [ -n "$CHANGED_FILES" ]; then files=$(comm -12 <(echo $files | tr ' ' '\n' | sort -u) <(echo $CHANGED_FILES | tr ' ' '\n' | sort -u)) fi -if [ "x$TEST" = "x" ] +if [ "$TEST" == "" ] then echo $files | xargs $CLANG_FORMAT -i else diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile index 6b19ac845b..811384fda1 100644 --- a/tools/dockerfile/test/sanity/Dockerfile +++ b/tools/dockerfile/test/sanity/Dockerfile @@ -97,17 +97,27 @@ RUN apt-get install -y openjdk-8-jdk # ./compile.sh without a local protoc dependency # TODO(mattkwong): install dependencies to support latest Bazel version if newer # version is needed -RUN git clone https://github.com/bazelbuild/bazel.git /bazel && cd /bazel && git checkout tags/0.4.1 && ./compile.sh +RUN git clone https://github.com/bazelbuild/bazel.git /bazel && \ + cd /bazel && git checkout tags/0.4.1 && ./compile.sh RUN ln -s /bazel/output/bazel /bin/ -#=================== -# Docker "inception" -# Note this is quite the ugly hack. -# This makes sure that the docker binary we inject has its dependencies. -RUN curl https://get.docker.com/ | sh -RUN apt-get remove --purge -y docker-engine +RUN apt-get update && apt-get -y install wget +RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN apt-get update && apt-get -y install clang-format-3.8 + +# Prepare ccache +RUN ln -s /usr/bin/ccache /usr/local/bin/gcc +RUN ln -s /usr/bin/ccache /usr/local/bin/g++ +RUN ln -s /usr/bin/ccache /usr/local/bin/cc +RUN ln -s /usr/bin/ccache /usr/local/bin/c++ +RUN ln -s /usr/bin/ccache /usr/local/bin/clang +RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ + RUN mkdir /var/local/jenkins + # Define the default command. CMD ["bash"] diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh index 523749ee81..ab29e015e0 100755 --- a/tools/gce/linux_performance_worker_init.sh +++ b/tools/gce/linux_performance_worker_init.sh @@ -150,3 +150,19 @@ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz # Put go on the PATH, keep the usual installation dir sudo ln -s /usr/local/go/bin/go /usr/bin/go rm go$GO_VERSION.$OS-$ARCH.tar.gz + +# Install perf, to profile benchmarks. (need to get the right linux-tools-<> for kernel version) +sudo apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r` +# see http://unix.stackexchange.com/questions/14227/do-i-need-root-admin-permissions-to-run-userspace-perf-tool-perf-events-ar +echo 0 | sudo tee /proc/sys/kernel/perf_event_paranoid +# see http://stackoverflow.com/questions/21284906/perf-couldnt-record-kernel-reference-relocation-symbol +echo 0 | sudo tee /proc/sys/kernel/kptr_restrict + +# qps workers under perf appear to need a lot of mmap pages under certain scenarios and perf args in +# order to not lose perf events or time out +echo 4096 | sudo tee /proc/sys/kernel/perf_event_mlock_kb + +# Fetch scripts to generate flame graphs from perf data collected +# on benchmarks +git clone -v https://github.com/brendangregg/FlameGraph ~/FlameGraph + diff --git a/tools/internal_ci/linux/grpc_fuzzer_client.cfg b/tools/internal_ci/linux/grpc_fuzzer_client.cfg new file mode 100644 index 0000000000..b1bce02282 --- /dev/null +++ b/tools/internal_ci/linux/grpc_fuzzer_client.cfg @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_fuzzer_client.sh" +timeout_mins: 1440 # 24 hours is the maximum allowed value +action { + define_artifacts { + regex: "git/grpc/fuzzer_output/**" + } +} diff --git a/src/python/grpcio_health_checking/grpc/__init__.py b/tools/internal_ci/linux/grpc_fuzzer_client.sh index fcc7048815..f9ff13d303 100644..100755 --- a/src/python/grpcio_health_checking/grpc/__init__.py +++ b/tools/internal_ci/linux/grpc_fuzzer_client.sh @@ -1,4 +1,5 @@ -# Copyright 2015, Google Inc. +#!/bin/bash +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,4 +28,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -__import__('pkg_resources').declare_namespace(__name__) +set -ex + +# change to grpc repo root +cd $(dirname $0)/../../.. + +git submodule update --init + +# download fuzzer docker image from dockerhub +export DOCKERHUB_ORGANIZATION=grpctesting +# runtime 23 * 60 mins +config=asan-trace-cmp runtime=82800 tools/jenkins/run_fuzzer.sh client_fuzzer diff --git a/tools/internal_ci/linux/grpc_master.cfg b/tools/internal_ci/linux/grpc_master.cfg index 1f81660078..8ce2ef11a2 100644 --- a/tools/internal_ci/linux/grpc_master.cfg +++ b/tools/internal_ci/linux/grpc_master.cfg @@ -31,4 +31,10 @@ # Config file for the internal CI (in protobuf text format) # Location of the continuous shell script in repository. -build_file: "grpc/tools/internal_ci/linux/run_tests.sh" +build_file: "grpc/tools/internal_ci/linux/grpc_master.sh" +timeout_mins: 60 +action { + define_artifacts { + regex: "**/sponge_log.xml" + } +} diff --git a/tools/internal_ci/linux/run_tests.sh b/tools/internal_ci/linux/grpc_master.sh index be477c1271..ea77d11305 100755 --- a/tools/internal_ci/linux/run_tests.sh +++ b/tools/internal_ci/linux/grpc_master.sh @@ -42,4 +42,12 @@ docker --version || true git submodule update --init -tools/run_tests/run_tests.py -l c --build_only +tools/run_tests/run_tests.py -l c -t -x sponge_log.xml || FAILED="true" + +# kill port_server.py to prevent the build from hanging +ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9 + +if [ "$FAILED" != "" ] +then + exit 1 +fi diff --git a/src/python/grpcio_reflection/grpc/__init__.py b/tools/run_tests/artifacts/__init__.py index 70ac5edd48..100a624dc9 100644 --- a/src/python/grpcio_reflection/grpc/__init__.py +++ b/tools/run_tests/artifacts/__init__.py @@ -26,5 +26,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py index 65d34e17e1..005d99790a 100644 --- a/tools/run_tests/artifact_targets.py +++ b/tools/run_tests/artifacts/artifact_targets.py @@ -35,7 +35,8 @@ import random import string import sys -import jobset +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, @@ -113,7 +114,7 @@ class PythonArtifact: environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch, - 'tools/run_tests/build_artifact_python.sh', + 'tools/run_tests/artifacts/build_artifact_python.sh', environ=environ, timeout_seconds=60*60) elif self.platform == 'windows': @@ -125,7 +126,7 @@ class PythonArtifact: # seed. We create a random temp-dir here dir = ''.join(random.choice(string.ascii_uppercase) for _ in range(10)) return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_python.bat', + ['tools\\run_tests\\artifacts\\build_artifact_python.bat', self.py_version, '32' if self.arch == 'x86' else '64', dir @@ -136,7 +137,7 @@ class PythonArtifact: environ['PYTHON'] = self.py_version environ['SKIP_PIP_INSTALL'] = 'TRUE' return create_jobspec(self.name, - ['tools/run_tests/build_artifact_python.sh'], + ['tools/run_tests/artifacts/build_artifact_python.sh'], environ=environ) def __str__(self): @@ -165,11 +166,11 @@ class RubyArtifact: environ['SETARCH_CMD'] = 'linux32' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch, - 'tools/run_tests/build_artifact_ruby.sh', + 'tools/run_tests/artifacts/build_artifact_ruby.sh', environ=environ) else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_ruby.sh']) + ['tools/run_tests/artifacts/build_artifact_ruby.sh']) class CSharpExtArtifact: @@ -184,7 +185,7 @@ class CSharpExtArtifact: def pre_build_jobspecs(self): if self.platform == 'windows': return [create_jobspec('prebuild_%s' % self.name, - ['tools\\run_tests\\pre_build_c.bat'], + ['tools\\run_tests\\helper_scripts\\pre_build_c.bat'], shell=True, flake_retries=5, timeout_retries=2)] @@ -195,7 +196,7 @@ class CSharpExtArtifact: if self.platform == 'windows': msbuild_platform = 'Win32' if self.arch == 'x86' else self.arch return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_csharp.bat', + ['tools\\run_tests\\artifacts\\build_artifact_csharp.bat', 'vsprojects\\grpc_csharp_ext.sln', '/p:Configuration=Release', '/p:PlatformToolset=v120', @@ -210,14 +211,14 @@ class CSharpExtArtifact: if self.platform == 'linux': return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch, - 'tools/run_tests/build_artifact_csharp.sh', + 'tools/run_tests/artifacts/build_artifact_csharp.sh', environ=environ) else: archflag = _ARCH_FLAG_MAP[self.arch] environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG) environ['LDFLAGS'] += ' %s' % archflag return create_jobspec(self.name, - ['tools/run_tests/build_artifact_csharp.sh'], + ['tools/run_tests/artifacts/build_artifact_csharp.sh'], environ=environ) def __str__(self): @@ -245,7 +246,7 @@ class NodeExtArtifact: def build_jobspec(self): if self.platform == 'windows': return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_node.bat', + ['tools\\run_tests\\artifacts\\build_artifact_node.bat', self.gyp_arch], shell=True) else: @@ -253,10 +254,10 @@ class NodeExtArtifact: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch), - 'tools/run_tests/build_artifact_node.sh {}'.format(self.gyp_arch)) + 'tools/run_tests/artifacts/build_artifact_node.sh {}'.format(self.gyp_arch)) else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_node.sh', + ['tools/run_tests/artifacts/build_artifact_node.sh', self.gyp_arch]) class PHPArtifact: @@ -276,10 +277,10 @@ class PHPArtifact: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch), - 'tools/run_tests/build_artifact_php.sh') + 'tools/run_tests/artifacts/build_artifact_php.sh') else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_php.sh']) + ['tools/run_tests/artifacts/build_artifact_php.sh']) class ProtocArtifact: """Builds protoc and protoc-plugin artifacts""" @@ -306,18 +307,18 @@ class ProtocArtifact: if self.platform == 'linux': return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_protoc', - 'tools/run_tests/build_artifact_protoc.sh', + 'tools/run_tests/artifacts/build_artifact_protoc.sh', environ=environ) else: environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG return create_jobspec(self.name, - ['tools/run_tests/build_artifact_protoc.sh'], + ['tools/run_tests/artifacts/build_artifact_protoc.sh'], environ=environ) else: generator = 'Visual Studio 12 Win64' if self.arch == 'x64' else 'Visual Studio 12' vcplatform = 'x64' if self.arch == 'x64' else 'Win32' return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_protoc.bat'], + ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'], environ={'generator': generator, 'Platform': vcplatform}) diff --git a/tools/run_tests/build_artifact_csharp.bat b/tools/run_tests/artifacts/build_artifact_csharp.bat index 24c8d485f9..24c8d485f9 100644 --- a/tools/run_tests/build_artifact_csharp.bat +++ b/tools/run_tests/artifacts/build_artifact_csharp.bat diff --git a/tools/run_tests/build_artifact_csharp.sh b/tools/run_tests/artifacts/build_artifact_csharp.sh index 7438713f5c..aed04b2745 100755 --- a/tools/run_tests/build_artifact_csharp.sh +++ b/tools/run_tests/artifacts/build_artifact_csharp.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. make grpc_csharp_ext diff --git a/tools/run_tests/build_artifact_node.bat b/tools/run_tests/artifacts/build_artifact_node.bat index 9175aa5c64..7439b76eb1 100644 --- a/tools/run_tests/build_artifact_node.bat +++ b/tools/run_tests/artifacts/build_artifact_node.bat @@ -27,7 +27,7 @@ @rem (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE @rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -set node_versions=0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 +set node_versions=0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 set electron_versions=1.0.0 1.1.0 1.2.0 1.3.0 1.4.0 diff --git a/tools/run_tests/build_artifact_node.sh b/tools/run_tests/artifacts/build_artifact_node.sh index c1ee71a4ce..02497a4db9 100755 --- a/tools/run_tests/build_artifact_node.sh +++ b/tools/run_tests/artifacts/build_artifact_node.sh @@ -34,7 +34,7 @@ source ~/.nvm/nvm.sh nvm use 4 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rm -rf build || true @@ -42,7 +42,7 @@ mkdir -p artifacts npm update -node_versions=( 0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 ) +node_versions=( 0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 ) electron_versions=( 1.0.0 1.1.0 1.2.0 1.3.0 1.4.0 ) diff --git a/tools/run_tests/build_artifact_php.sh b/tools/run_tests/artifacts/build_artifact_php.sh index 669447fa9a..c8d55860c1 100755 --- a/tools/run_tests/build_artifact_php.sh +++ b/tools/run_tests/artifacts/build_artifact_php.sh @@ -31,7 +31,7 @@ PHP_TARGET_ARCH=$1 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts diff --git a/tools/run_tests/build_artifact_protoc.bat b/tools/run_tests/artifacts/build_artifact_protoc.bat index 3246a903d0..fd93318833 100644 --- a/tools/run_tests/build_artifact_protoc.bat +++ b/tools/run_tests/artifacts/build_artifact_protoc.bat @@ -30,15 +30,16 @@ mkdir artifacts setlocal -cd third_party/protobuf +cd third_party/protobuf/cmake -cd cmake -cmake -G "%generator%" -Dprotobuf_BUILD_TESTS=OFF || goto :error +mkdir build & cd build +mkdir solution & cd solution +cmake -G "%generator%" -Dprotobuf_BUILD_TESTS=OFF ../../.. || goto :error endlocal call vsprojects/build_plugins.bat || goto :error -xcopy /Y third_party\protobuf\cmake\Release\protoc.exe artifacts\ || goto :error +xcopy /Y third_party\protobuf\cmake\build\solution\Release\protoc.exe artifacts\ || goto :error xcopy /Y vsprojects\Release\*_plugin.exe artifacts\ || xcopy /Y vsprojects\x64\Release\*_plugin.exe artifacts\ || goto :error goto :EOF diff --git a/tools/run_tests/build_artifact_protoc.sh b/tools/run_tests/artifacts/build_artifact_protoc.sh index 161d3a84d6..26c2280eff 100755 --- a/tools/run_tests/build_artifact_protoc.sh +++ b/tools/run_tests/artifacts/build_artifact_protoc.sh @@ -33,7 +33,7 @@ source scl_source enable devtoolset-1.1 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. make plugins diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/artifacts/build_artifact_python.bat index 246713a6ce..246713a6ce 100644 --- a/tools/run_tests/build_artifact_python.bat +++ b/tools/run_tests/artifacts/build_artifact_python.bat diff --git a/tools/run_tests/build_artifact_python.sh b/tools/run_tests/artifacts/build_artifact_python.sh index 2a1d41fd68..5a5506029a 100755 --- a/tools/run_tests/build_artifact_python.sh +++ b/tools/run_tests/artifacts/build_artifact_python.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. export GRPC_PYTHON_USE_CUSTOM_BDIST=0 export GRPC_PYTHON_BUILD_WITH_CYTHON=1 diff --git a/tools/run_tests/build_artifact_ruby.sh b/tools/run_tests/artifacts/build_artifact_ruby.sh index 2d97b4068b..019efb01fd 100755 --- a/tools/run_tests/build_artifact_ruby.sh +++ b/tools/run_tests/artifacts/build_artifact_ruby.sh @@ -31,7 +31,7 @@ set -ex SYSTEM=`uname | cut -f 1 -d_` -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. set +ex [[ -s /etc/profile.d/rvm.sh ]] && . /etc/profile.d/rvm.sh [[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" diff --git a/tools/run_tests/build_package_node.sh b/tools/run_tests/artifacts/build_package_node.sh index a5636cf87a..8b5e8c0bc1 100755 --- a/tools/run_tests/build_package_node.sh +++ b/tools/run_tests/artifacts/build_package_node.sh @@ -33,7 +33,7 @@ source ~/.nvm/nvm.sh nvm use 4 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. base=$(pwd) diff --git a/tools/run_tests/build_package_php.sh b/tools/run_tests/artifacts/build_package_php.sh index 56e3319ed9..42a8d9f8df 100755 --- a/tools/run_tests/build_package_php.sh +++ b/tools/run_tests/artifacts/build_package_php.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts/ cp -r $EXTERNAL_GIT_ROOT/architecture={x86,x64},language=php,platform={windows,linux,macos}/artifacts/* artifacts/ || true diff --git a/tools/run_tests/build_package_python.sh b/tools/run_tests/artifacts/build_package_python.sh index 2511a6ae46..4a1c15ceee 100755 --- a/tools/run_tests/build_package_python.sh +++ b/tools/run_tests/artifacts/build_package_python.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts/ diff --git a/tools/run_tests/build_package_ruby.sh b/tools/run_tests/artifacts/build_package_ruby.sh index 0a755bddb0..b4d20d8a4c 100755 --- a/tools/run_tests/build_package_ruby.sh +++ b/tools/run_tests/artifacts/build_package_ruby.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. base=$(pwd) diff --git a/tools/run_tests/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py index a16daac4fe..a7535b3852 100644 --- a/tools/run_tests/distribtest_targets.py +++ b/tools/run_tests/artifacts/distribtest_targets.py @@ -30,7 +30,11 @@ """Definition of targets run distribution package tests.""" -import jobset +import os.path +import sys + +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, diff --git a/tools/run_tests/package_targets.py b/tools/run_tests/artifacts/package_targets.py index 673affeac0..d490f571c3 100644 --- a/tools/run_tests/package_targets.py +++ b/tools/run_tests/artifacts/package_targets.py @@ -30,7 +30,12 @@ """Definition of targets to build distribution packages.""" -import jobset +import os.path +import sys + +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset + def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, flake_retries=0, timeout_retries=0): @@ -114,7 +119,7 @@ class NodePackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_node.sh') + 'tools/run_tests/artifacts/build_package_node.sh') class RubyPackage: @@ -131,7 +136,7 @@ class RubyPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_ruby.sh') + 'tools/run_tests/artifacts/build_package_ruby.sh') class PythonPackage: @@ -148,7 +153,7 @@ class PythonPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_python.sh') + 'tools/run_tests/artifacts/build_package_python.sh') class PHPPackage: @@ -165,7 +170,7 @@ class PHPPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_php.sh') + 'tools/run_tests/artifacts/build_package_php.sh') def targets(): diff --git a/tools/run_tests/build_stats_schema.json b/tools/run_tests/build_stats/build_stats_schema.json index 021a349545..021a349545 100644 --- a/tools/run_tests/build_stats_schema.json +++ b/tools/run_tests/build_stats/build_stats_schema.json diff --git a/tools/run_tests/build_stats_schema_no_matrix.json b/tools/run_tests/build_stats/build_stats_schema_no_matrix.json index eeb067d7a5..eeb067d7a5 100644 --- a/tools/run_tests/build_stats_schema_no_matrix.json +++ b/tools/run_tests/build_stats/build_stats_schema_no_matrix.json diff --git a/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh index c3219c533d..b68ac89121 100755 --- a/tools/run_tests/dockerize/build_docker_and_run_tests.sh +++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh @@ -77,8 +77,6 @@ docker run \ -v /tmp/ccache:/tmp/ccache \ -v /tmp/npm-cache:/tmp/npm-cache \ -v /tmp/xdg-cache-home:/tmp/xdg-cache-home \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $(which docker):/bin/docker \ -w /var/local/git/grpc \ --name=$CONTAINER_NAME \ $DOCKER_IMAGE_NAME \ diff --git a/tools/run_tests/configs.json b/tools/run_tests/generated/configs.json index b0839ef026..b0839ef026 100644 --- a/tools/run_tests/configs.json +++ b/tools/run_tests/generated/configs.json diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 2e6877ccac..6ae269cc20 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2263,7 +2263,7 @@ }, { "deps": [ - "google_benchmark", + "benchmark", "gpr", "gpr_test_util", "grpc", @@ -2913,7 +2913,7 @@ }, { "deps": [ - "google_benchmark" + "benchmark" ], "headers": [], "is_filegroup": false, @@ -6207,30 +6207,30 @@ { "deps": [], "headers": [ - "third_party/google_benchmark/include/benchmark/benchmark.h", - "third_party/google_benchmark/include/benchmark/benchmark_api.h", - "third_party/google_benchmark/include/benchmark/macros.h", - "third_party/google_benchmark/include/benchmark/reporter.h", - "third_party/google_benchmark/src/arraysize.h", - "third_party/google_benchmark/src/benchmark_api_internal.h", - "third_party/google_benchmark/src/check.h", - "third_party/google_benchmark/src/colorprint.h", - "third_party/google_benchmark/src/commandlineflags.h", - "third_party/google_benchmark/src/complexity.h", - "third_party/google_benchmark/src/cycleclock.h", - "third_party/google_benchmark/src/internal_macros.h", - "third_party/google_benchmark/src/log.h", - "third_party/google_benchmark/src/mutex.h", - "third_party/google_benchmark/src/re.h", - "third_party/google_benchmark/src/sleep.h", - "third_party/google_benchmark/src/stat.h", - "third_party/google_benchmark/src/string_util.h", - "third_party/google_benchmark/src/sysinfo.h", - "third_party/google_benchmark/src/timers.h" - ], - "is_filegroup": false, - "language": "c++", - "name": "google_benchmark", + "third_party/benchmark/include/benchmark/benchmark.h", + "third_party/benchmark/include/benchmark/benchmark_api.h", + "third_party/benchmark/include/benchmark/macros.h", + "third_party/benchmark/include/benchmark/reporter.h", + "third_party/benchmark/src/arraysize.h", + "third_party/benchmark/src/benchmark_api_internal.h", + "third_party/benchmark/src/check.h", + "third_party/benchmark/src/colorprint.h", + "third_party/benchmark/src/commandlineflags.h", + "third_party/benchmark/src/complexity.h", + "third_party/benchmark/src/cycleclock.h", + "third_party/benchmark/src/internal_macros.h", + "third_party/benchmark/src/log.h", + "third_party/benchmark/src/mutex.h", + "third_party/benchmark/src/re.h", + "third_party/benchmark/src/sleep.h", + "third_party/benchmark/src/stat.h", + "third_party/benchmark/src/string_util.h", + "third_party/benchmark/src/sysinfo.h", + "third_party/benchmark/src/timers.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "benchmark", "src": [], "third_party": false, "type": "lib" diff --git a/tools/run_tests/tests.json b/tools/run_tests/generated/tests.json index b76263b8b9..b76263b8b9 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/generated/tests.json diff --git a/tools/run_tests/build_csharp.sh b/tools/run_tests/helper_scripts/build_csharp.sh index 48ce11a10b..84c5b1c777 100755 --- a/tools/run_tests/build_csharp.sh +++ b/tools/run_tests/helper_scripts/build_csharp.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp # overriding NativeDependenciesConfigurationUnix is needed to make gcov code coverage work. xbuild /p:Configuration=$MSBUILD_CONFIG /p:NativeDependenciesConfigurationUnix=$CONFIG Grpc.sln diff --git a/tools/run_tests/build_csharp_coreclr.bat b/tools/run_tests/helper_scripts/build_csharp_coreclr.bat index b6e3ccbd2b..78e5f5998b 100644 --- a/tools/run_tests/build_csharp_coreclr.bat +++ b/tools/run_tests/helper_scripts/build_csharp_coreclr.bat @@ -29,7 +29,7 @@ setlocal -cd /d %~dp0\..\..\src\csharp +cd /d %~dp0\..\..\..\src\csharp dotnet restore . || goto :error diff --git a/tools/run_tests/build_csharp_coreclr.sh b/tools/run_tests/helper_scripts/build_csharp_coreclr.sh index 02cf0d39cb..dd5fd31c75 100755 --- a/tools/run_tests/build_csharp_coreclr.sh +++ b/tools/run_tests/helper_scripts/build_csharp_coreclr.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp # TODO(jtattermusch): introduce caching dotnet restore . diff --git a/tools/run_tests/build_node.bat b/tools/run_tests/helper_scripts/build_node.bat index 82e8208348..82e8208348 100644 --- a/tools/run_tests/build_node.bat +++ b/tools/run_tests/helper_scripts/build_node.bat diff --git a/tools/run_tests/build_node.sh b/tools/run_tests/helper_scripts/build_node.sh index d9292fd8aa..8a928bb762 100755 --- a/tools/run_tests/build_node.sh +++ b/tools/run_tests/helper_scripts/build_node.sh @@ -38,6 +38,6 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. npm install --unsafe-perm --build-from-source diff --git a/tools/run_tests/build_node_electron.sh b/tools/run_tests/helper_scripts/build_node_electron.sh index 1c95c513b3..1c95c513b3 100755 --- a/tools/run_tests/build_node_electron.sh +++ b/tools/run_tests/helper_scripts/build_node_electron.sh diff --git a/tools/run_tests/build_php.sh b/tools/run_tests/helper_scripts/build_php.sh index 77a8abcfe7..acaaa23adf 100755 --- a/tools/run_tests/build_php.sh +++ b/tools/run_tests/helper_scripts/build_php.sh @@ -33,7 +33,7 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. root=`pwd` export GRPC_LIB_SUBDIR=libs/$CONFIG diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/helper_scripts/build_python.sh index fb884ad166..0e88e96765 100755 --- a/tools/run_tests/build_python.sh +++ b/tools/run_tests/helper_scripts/build_python.sh @@ -31,7 +31,7 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. ########################## # Portability operations # @@ -171,8 +171,7 @@ pip_install_dir() { } $VENV_PYTHON -m pip install --upgrade pip -# TODO(https://github.com/pypa/setuptools/issues/709) get the latest setuptools -$VENV_PYTHON -m pip install setuptools==25.1.1 +$VENV_PYTHON -m pip install setuptools $VENV_PYTHON -m pip install cython pip_install_dir $ROOT $VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py diff --git a/tools/run_tests/build_python_msys2.sh b/tools/run_tests/helper_scripts/build_python_msys2.sh index 6e9d369018..6e9d369018 100644 --- a/tools/run_tests/build_python_msys2.sh +++ b/tools/run_tests/helper_scripts/build_python_msys2.sh diff --git a/tools/run_tests/build_ruby.sh b/tools/run_tests/helper_scripts/build_ruby.sh index 10343fce69..32638dede9 100755 --- a/tools/run_tests/build_ruby.sh +++ b/tools/run_tests/helper_scripts/build_ruby.sh @@ -34,7 +34,7 @@ set -ex export GRPC_CONFIG=${CONFIG:-opt} # change to grpc's ruby directory -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rm -rf ./tmp rake compile diff --git a/tools/run_tests/post_tests_c.sh b/tools/run_tests/helper_scripts/post_tests_c.sh index 4409526dab..a83a59e23b 100755 --- a/tools/run_tests/post_tests_c.sh +++ b/tools/run_tests/helper_scripts/post_tests_c.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/c_cxx_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/post_tests_csharp.bat b/tools/run_tests/helper_scripts/post_tests_csharp.bat index 0d49a00b2a..2359f148ce 100644 --- a/tools/run_tests/post_tests_csharp.bat +++ b/tools/run_tests/helper_scripts/post_tests_csharp.bat @@ -36,7 +36,7 @@ if not "%CONFIG%" == "gcov" ( ) @rem enter src/csharp directory -cd /d %~dp0\..\..\src\csharp +cd /d %~dp0\..\..\..\src\csharp @rem Generate code coverage report @rem TODO(jtattermusch): currently the report list is hardcoded diff --git a/tools/run_tests/post_tests_csharp.sh b/tools/run_tests/helper_scripts/post_tests_csharp.sh index bb6f5c6e18..762c1f8827 100755 --- a/tools/run_tests/post_tests_csharp.sh +++ b/tools/run_tests/helper_scripts/post_tests_csharp.sh @@ -33,7 +33,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi # change to gRPC repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. # Generate the csharp extension coverage report gcov objs/gcov/src/csharp/ext/*.o diff --git a/tools/run_tests/post_tests_php.sh b/tools/run_tests/helper_scripts/post_tests_php.sh index b4098066ea..23dc202322 100755 --- a/tools/run_tests/post_tests_php.sh +++ b/tools/run_tests/helper_scripts/post_tests_php.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/php_ext_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/post_tests_ruby.sh b/tools/run_tests/helper_scripts/post_tests_ruby.sh index 0877e44805..300edfe8a3 100755 --- a/tools/run_tests/post_tests_ruby.sh +++ b/tools/run_tests/helper_scripts/post_tests_ruby.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/ruby_ext_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/pre_build_c.bat b/tools/run_tests/helper_scripts/pre_build_c.bat index e4ab69384c..75b90f85b2 100644 --- a/tools/run_tests/pre_build_c.bat +++ b/tools/run_tests/helper_scripts/pre_build_c.bat @@ -32,7 +32,7 @@ setlocal @rem enter repo root -cd /d %~dp0\..\.. +cd /d %~dp0\..\..\.. @rem Location of nuget.exe set NUGET=C:\nuget\nuget.exe diff --git a/tools/run_tests/pre_build_csharp.bat b/tools/run_tests/helper_scripts/pre_build_csharp.bat index f15979a96b..139955d4da 100644 --- a/tools/run_tests/pre_build_csharp.bat +++ b/tools/run_tests/helper_scripts/pre_build_csharp.bat @@ -32,7 +32,7 @@ setlocal @rem enter repo root -cd /d %~dp0\..\.. +cd /d %~dp0\..\..\.. @rem Location of nuget.exe set NUGET=C:\nuget\nuget.exe diff --git a/tools/run_tests/pre_build_csharp.sh b/tools/run_tests/helper_scripts/pre_build_csharp.sh index ee678ddce5..1f808556f4 100755 --- a/tools/run_tests/pre_build_csharp.sh +++ b/tools/run_tests/helper_scripts/pre_build_csharp.sh @@ -31,7 +31,7 @@ set -ex # cd to gRPC csharp directory -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp root=`pwd` diff --git a/tools/run_tests/pre_build_node.bat b/tools/run_tests/helper_scripts/pre_build_node.bat index addb01a2a4..addb01a2a4 100644 --- a/tools/run_tests/pre_build_node.bat +++ b/tools/run_tests/helper_scripts/pre_build_node.bat diff --git a/tools/run_tests/pre_build_node.sh b/tools/run_tests/helper_scripts/pre_build_node.sh index e63be9da52..e63be9da52 100755 --- a/tools/run_tests/pre_build_node.sh +++ b/tools/run_tests/helper_scripts/pre_build_node.sh diff --git a/tools/run_tests/pre_build_node_electron.sh b/tools/run_tests/helper_scripts/pre_build_node_electron.sh index 95c56aa509..95c56aa509 100755 --- a/tools/run_tests/pre_build_node_electron.sh +++ b/tools/run_tests/helper_scripts/pre_build_node_electron.sh diff --git a/tools/run_tests/pre_build_ruby.sh b/tools/run_tests/helper_scripts/pre_build_ruby.sh index e7074c45c2..56b58df544 100755 --- a/tools/run_tests/pre_build_ruby.sh +++ b/tools/run_tests/helper_scripts/pre_build_ruby.sh @@ -34,6 +34,6 @@ set -ex export GRPC_CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. bundle install diff --git a/tools/run_tests/run_lcov.sh b/tools/run_tests/helper_scripts/run_lcov.sh index 796a0b5ceb..bc7b44cd3e 100755 --- a/tools/run_tests/run_lcov.sh +++ b/tools/run_tests/helper_scripts/run_lcov.sh @@ -32,7 +32,7 @@ set -ex out=$(readlink -f ${1:-coverage}) -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) shift || true tmp=$(mktemp) cd $root diff --git a/tools/run_tests/run_node.bat b/tools/run_tests/helper_scripts/run_node.bat index 0987fbee55..0987fbee55 100644 --- a/tools/run_tests/run_node.bat +++ b/tools/run_tests/helper_scripts/run_node.bat diff --git a/tools/run_tests/run_node.sh b/tools/run_tests/helper_scripts/run_node.sh index 44f75645f5..0fafe9481a 100755 --- a/tools/run_tests/run_node.sh +++ b/tools/run_tests/helper_scripts/run_node.sh @@ -37,7 +37,7 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. root=`pwd` diff --git a/tools/run_tests/run_node_electron.sh b/tools/run_tests/helper_scripts/run_node_electron.sh index 1999ffb0fa..1999ffb0fa 100755 --- a/tools/run_tests/run_node_electron.sh +++ b/tools/run_tests/helper_scripts/run_node_electron.sh diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/helper_scripts/run_python.sh index 17e0186f2a..7be473428f 100755 --- a/tools/run_tests/run_python.sh +++ b/tools/run_tests/helper_scripts/run_python.sh @@ -31,7 +31,7 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. PYTHON=`realpath -s "${1:-py27/bin/python}"` diff --git a/tools/run_tests/run_ruby.sh b/tools/run_tests/helper_scripts/run_ruby.sh index 73a84ac361..ab153b7e25 100755 --- a/tools/run_tests/run_ruby.sh +++ b/tools/run_tests/helper_scripts/run_ruby.sh @@ -31,6 +31,6 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rake diff --git a/tools/run_tests/run_tests_in_workspace.sh b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh index 9c6c5b76e0..002c8d6de2 100755 --- a/tools/run_tests/run_tests_in_workspace.sh +++ b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh @@ -34,7 +34,7 @@ # newly created workspace) set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. export repo_root=$(pwd) rm -rf "${WORKSPACE_NAME}" diff --git a/tools/run_tests/interop_html_report.template b/tools/run_tests/interop/interop_html_report.template index 46cce426b7..46cce426b7 100644 --- a/tools/run_tests/interop_html_report.template +++ b/tools/run_tests/interop/interop_html_report.template diff --git a/tools/run_tests/performance/process_local_perf_flamegraphs.sh b/tools/run_tests/performance/process_local_perf_flamegraphs.sh new file mode 100755 index 0000000000..d15610f137 --- /dev/null +++ b/tools/run_tests/performance/process_local_perf_flamegraphs.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mkdir -p $OUTPUT_DIR + +PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data +PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf + +# Generate Flame graphs +echo "running perf script on $PERF_DATA_FILE" +perf script -i $PERF_DATA_FILE > $PERF_SCRIPT_OUTPUT + +~/FlameGraph/stackcollapse-perf.pl $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg diff --git a/tools/run_tests/performance/process_remote_perf_flamegraphs.sh b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh new file mode 100755 index 0000000000..cc075354cc --- /dev/null +++ b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mkdir -p $OUTPUT_DIR + +PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data +PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf + +# Generate Flame graphs +echo "running perf script on $USER_AT_HOST with perf.data" +ssh $USER_AT_HOST "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz" + +scp $USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz . + +gzip -d -f $PERF_SCRIPT_OUTPUT.gz + +~/FlameGraph/stackcollapse-perf.pl --kernel $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl --color=java --hash > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg diff --git a/tools/distrib/python/grpcio_tools/grpc/__init__.py b/tools/run_tests/python_utils/__init__.py index 70ac5edd48..100a624dc9 100644 --- a/tools/distrib/python/grpcio_tools/grpc/__init__.py +++ b/tools/run_tests/python_utils/__init__.py @@ -26,5 +26,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/tools/run_tests/antagonist.py b/tools/run_tests/python_utils/antagonist.py index 857addfb38..857addfb38 100755 --- a/tools/run_tests/antagonist.py +++ b/tools/run_tests/python_utils/antagonist.py diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py index 4a7e61b3c4..0869c5cee9 100755 --- a/tools/run_tests/dockerjob.py +++ b/tools/run_tests/python_utils/dockerjob.py @@ -31,13 +31,14 @@ from __future__ import print_function -import jobset import tempfile import time import uuid import os import subprocess +import jobset + _DEVNULL = open(os.devnull, 'w') diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py index ca1d6d4eb5..ca1d6d4eb5 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/python_utils/filter_pull_request_tests.py diff --git a/tools/run_tests/jobset.py b/tools/run_tests/python_utils/jobset.py index 2acc7971f6..7b2c62d1a2 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/python_utils/jobset.py @@ -139,16 +139,16 @@ def message(tag, msg, explanatory_text=None, do_newline=False): if explanatory_text: print(explanatory_text) print('%s: %s' % (tag, msg)) - return - sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % ( - _BEGINNING_OF_LINE, - _CLEAR_LINE, - '\n%s' % explanatory_text if explanatory_text is not None else '', - _COLORS[_TAG_COLOR[tag]][1], - _COLORS[_TAG_COLOR[tag]][0], - tag, - msg, - '\n' if do_newline or explanatory_text is not None else '')) + else: + sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % ( + _BEGINNING_OF_LINE, + _CLEAR_LINE, + '\n%s' % explanatory_text if explanatory_text is not None else '', + _COLORS[_TAG_COLOR[tag]][1], + _COLORS[_TAG_COLOR[tag]][0], + tag, + msg, + '\n' if do_newline or explanatory_text is not None else '')) sys.stdout.flush() except: pass @@ -219,7 +219,8 @@ class JobResult(object): class Job(object): """Manages one job.""" - def __init__(self, spec, newline_on_success, travis, add_env): + def __init__(self, spec, newline_on_success, travis, add_env, + quiet_success=False): self._spec = spec self._newline_on_success = newline_on_success self._travis = travis @@ -227,7 +228,9 @@ class Job(object): self._retries = 0 self._timeout_retries = 0 self._suppress_failure_message = False - message('START', spec.shortname, do_newline=self._travis) + self._quiet_success = quiet_success + if not self._quiet_success: + message('START', spec.shortname, do_newline=self._travis) self.result = JobResult() self.start() @@ -302,10 +305,11 @@ class Job(object): if real > 0.5: cores = (user + sys) / real measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost) - message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % ( - self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement), - stdout() if self._spec.verbose_success else None, - do_newline=self._newline_on_success or self._travis) + if not self._quiet_success: + message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % ( + self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement), + stdout() if self._spec.verbose_success else None, + do_newline=self._newline_on_success or self._travis) self.result.state = 'PASSED' elif (self._state == _RUNNING and self._spec.timeout_seconds is not None and @@ -341,7 +345,7 @@ class Jobset(object): """Manages one run of jobs.""" def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, - stop_on_failure, add_env): + stop_on_failure, add_env, quiet_success): self._running = set() self._check_cancelled = check_cancelled self._cancelled = False @@ -352,6 +356,7 @@ class Jobset(object): self._travis = travis self._stop_on_failure = stop_on_failure self._add_env = add_env + self._quiet_success = quiet_success self.resultset = {} self._remaining = None self._start_time = time.time() @@ -380,7 +385,8 @@ class Jobset(object): job = Job(spec, self._newline_on_success, self._travis, - self._add_env) + self._add_env, + self._quiet_success) self._running.add(job) if job.GetSpec().shortname not in self.resultset: self.resultset[job.GetSpec().shortname] = [] @@ -403,10 +409,11 @@ class Jobset(object): break for job in dead: self._completed += 1 - self.resultset[job.GetSpec().shortname].append(job.result) + if not self._quiet_success or job.result.state != 'PASSED': + self.resultset[job.GetSpec().shortname].append(job.result) self._running.remove(job) if dead: return - if (not self._travis): + if not self._travis and platform_string() != 'windows': rstr = '' if self._remaining is None else '%d queued, ' % self._remaining if self._remaining is not None and self._completed > 0: now = time.time() @@ -463,7 +470,8 @@ def run(cmdlines, infinite_runs=False, stop_on_failure=False, add_env={}, - skip_jobs=False): + skip_jobs=False, + quiet_success=False): if skip_jobs: results = {} skipped_job_result = JobResult() @@ -474,7 +482,8 @@ def run(cmdlines, return results js = Jobset(check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, - newline_on_success, travis, stop_on_failure, add_env) + newline_on_success, travis, stop_on_failure, add_env, + quiet_success) for cmdline, remaining in tag_remaining(cmdlines): if not js.start(cmdline): break diff --git a/tools/run_tests/port_server.py b/tools/run_tests/python_utils/port_server.py index e9b3f7ff79..e9b3f7ff79 100755 --- a/tools/run_tests/port_server.py +++ b/tools/run_tests/python_utils/port_server.py diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/python_utils/report_utils.py index 90055e3530..352cf7abe7 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/python_utils/report_utils.py @@ -84,7 +84,7 @@ def render_interop_html_report( client_langs, server_langs, test_cases, auth_test_cases, http2_cases, resultset, num_failures, cloud_to_prod, prod_servers, http2_interop): """Generate HTML report for interop tests.""" - template_file = 'tools/run_tests/interop_html_report.template' + template_file = 'tools/run_tests/interop/interop_html_report.template' try: mytemplate = Template(filename=template_file, format_exceptions=True) except NameError: @@ -122,3 +122,10 @@ def render_interop_html_report( except: print(exceptions.text_error_template().render()) raise + +def render_perf_profiling_results(output_filepath, profile_names): + with open(output_filepath, 'w') as output_file: + output_file.write('<ul>\n') + for name in profile_names: + output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name)) + output_file.write('</ul>\n') diff --git a/tools/run_tests/watch_dirs.py b/tools/run_tests/python_utils/watch_dirs.py index 21ef23e158..21ef23e158 100755 --- a/tools/run_tests/watch_dirs.py +++ b/tools/run_tests/python_utils/watch_dirs.py diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 83cfc429f9..c14f18af81 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -34,20 +34,21 @@ from __future__ import print_function import argparse import atexit -import dockerjob import itertools -import jobset import json import multiprocessing import os import re -import report_utils import subprocess import sys import tempfile import time import uuid +import python_utils.dockerjob as dockerjob +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils + # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 1d0c98fb69..b7b742d7af 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -35,14 +35,11 @@ from __future__ import print_function import argparse import collections import itertools -import jobset import json import multiprocessing import os -import performance.scenario_config as scenario_config import pipes import re -import report_utils import subprocess import sys import tempfile @@ -50,6 +47,10 @@ import time import traceback import uuid +import performance.scenario_config as scenario_config +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils + _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) @@ -57,15 +58,18 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' +_PERF_REPORT_OUTPUT_DIR = 'perf_reports' + class QpsWorkerJob: """Encapsulates a qps worker server job.""" - def __init__(self, spec, language, host_and_port): + def __init__(self, spec, language, host_and_port, perf_file_base_name=None): self._spec = spec self.language = language self.host_and_port = host_and_port self._job = None + self.perf_file_base_name = perf_file_base_name def start(self): self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={}) @@ -80,24 +84,32 @@ class QpsWorkerJob: self._job = None -def create_qpsworker_job(language, shortname=None, - port=10000, remote_host=None): - cmdline = language.worker_cmdline() + ['--driver_port=%s' % port] +def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None): + cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port]) + if remote_host: - user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) - cmdline = ['ssh', - str(user_at_host), - 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)] host_and_port='%s:%s' % (remote_host, port) else: host_and_port='localhost:%s' % port + perf_file_base_name = None + if perf_cmd: + perf_file_base_name = '%s-%s' % (host_and_port, shortname) + # specify -o output file so perf.data gets collected when worker stopped + cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline + + if remote_host: + user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) + ssh_cmd = ['ssh'] + ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)]) + cmdline = ssh_cmd + jobspec = jobset.JobSpec( cmdline=cmdline, shortname=shortname, timeout_seconds=5*60, # workers get restarted after each scenario verbose_success=True) - return QpsWorkerJob(jobspec, language, host_and_port) + return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name) def create_scenario_jobspec(scenario_json, workers, remote_host=None, @@ -259,7 +271,7 @@ def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), bui sys.exit(1) -def create_qpsworkers(languages, worker_hosts): +def create_qpsworkers(languages, worker_hosts, perf_cmd=None): """Creates QPS workers (but does not start them).""" if not worker_hosts: # run two workers locally (for each language) @@ -275,11 +287,32 @@ def create_qpsworkers(languages, worker_hosts): shortname= 'qps_worker_%s_%s' % (language, worker_idx), port=worker[1] + language.worker_port_offset(), - remote_host=worker[0]) + remote_host=worker[0], + perf_cmd=perf_cmd) for language in languages for worker_idx, worker in enumerate(workers)] +def perf_report_processor_job(worker_host, perf_base_name, output_filename): + print('Creating perf report collection job for %s' % worker_host) + cmd = '' + if worker_host != 'localhost': + user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host) + cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ + tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \ + % (user_at_host, output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name) + else: + cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ + tools/run_tests/performance/process_local_perf_flamegraphs.sh" \ + % (output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name) + + return jobset.JobSpec(cmdline=cmd, + timeout_seconds=3*60, + shell=True, + verbose_success=True, + shortname='process perf report') + + Scenario = collections.namedtuple('Scenario', 'jobspec workers name') @@ -372,6 +405,31 @@ def finish_qps_workers(jobs): print('All QPS workers finished.') return num_killed +profile_output_files = [] + +# Collect perf text reports and flamegraphs if perf_cmd was used +# Note the base names of perf text reports are used when creating and processing +# perf data. The scenario name uniqifies the output name in the final +# perf reports directory. +# Alos, the perf profiles need to be fetched and processed after each scenario +# in order to avoid clobbering the output files. +def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): + perf_report_jobs = [] + global profile_output_files + for host_and_port in hosts_and_base_names: + perf_base_name = hosts_and_base_names[host_and_port] + output_filename = '%s-%s' % (scenario_name, perf_base_name) + # from the base filename, create .svg output filename + host = host_and_port.split(':')[0] + profile_output_files.append('%s.svg' % output_filename) + perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename)) + + jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True) + failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1) + jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True) + return failures + + argp = argparse.ArgumentParser(description='Run performance tests.') argp.add_argument('-l', '--language', choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), @@ -405,6 +463,33 @@ argp.add_argument('--netperf', help='Run netperf benchmark as one of the scenarios.') argp.add_argument('-x', '--xml_report', default='report.xml', type=str, help='Name of XML report file to generate.') +argp.add_argument('--perf_args', + help=('Example usage: "--perf_args=record -F 99 -g". ' + 'Wrap QPS workers in a perf command ' + 'with the arguments to perf specified here. ' + '".svg" flame graph profiles will be ' + 'created for each Qps Worker on each scenario. ' + 'Files will output to "<repo_root>/perf_reports" ' + 'directory. Output files from running the worker ' + 'under perf are saved in the repo root where its ran. ' + 'Note that the perf "-g" flag is necessary for ' + 'flame graphs generation to work (assuming the binary ' + 'being profiled uses frame pointers, check out ' + '"--call-graph dwarf" option using libunwind otherwise.) ' + 'Also note that the entire "--perf_args=<arg(s)>" must ' + 'be wrapped in quotes as in the example usage. ' + 'If the "--perg_args" is unspecified, "perf" will ' + 'not be used at all. ' + 'See http://www.brendangregg.com/perf.html ' + 'for more general perf examples.')) +argp.add_argument('--skip_generate_flamegraphs', + default=False, + action='store_const', + const=True, + help=('Turn flame graph generation off. ' + 'May be useful if "perf_args" arguments do not make sense for ' + 'generating flamegraphs (e.g., "--perf_args=stat ...")')) + args = argp.parse_args() @@ -435,7 +520,13 @@ if not args.remote_driver_host: if not args.dry_run: build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) -qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host) +perf_cmd = None +if args.perf_args: + # Expect /usr/bin/perf to be installed here, as is usual + perf_cmd = ['/usr/bin/perf'] + perf_cmd.extend(re.split('\s+', args.perf_args)) + +qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) # get list of worker addresses for each language. workers_by_lang = dict([(str(language), []) for language in languages]) @@ -457,16 +548,20 @@ if not scenarios: total_scenario_failures = 0 qps_workers_killed = 0 merged_resultset = {} +perf_report_failures = 0 + for scenario in scenarios: if args.dry_run: print(scenario.name) else: + scenario_failures = 0 try: for worker in scenario.workers: worker.start() - scenario_failures, resultset = jobset.run([scenario.jobspec, - create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)], - newline_on_success=True, maxjobs=1) + jobs = [scenario.jobspec] + if scenario.workers: + jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) + scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1) total_scenario_failures += scenario_failures merged_resultset = dict(itertools.chain(merged_resultset.iteritems(), resultset.iteritems())) @@ -474,10 +569,27 @@ for scenario in scenarios: # Consider qps workers that need to be killed as failures qps_workers_killed += finish_qps_workers(scenario.workers) + if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: + workers_and_base_names = {} + for worker in scenario.workers: + if not worker.perf_file_base_name: + raise Exception('using perf buf perf report filename is unspecified') + workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name + perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name) -report_utils.render_junit_xml_report(merged_resultset, args.xml_report, - suite_name='benchmarks') + +# Still write the index.html even if some scenarios failed. +# 'profile_output_files' will only have names for scenarios that passed +if perf_cmd and not args.skip_generate_flamegraphs: + # write the index fil to the output dir, with all profiles from all scenarios/workers + report_utils.render_perf_profiling_results('%s/index.html' % _PERF_REPORT_OUTPUT_DIR, profile_output_files) if total_scenario_failures > 0 or qps_workers_killed > 0: - print ("%s scenarios failed and %s qps worker jobs killed" % (total_scenario_failures, qps_workers_killed)) + print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) + sys.exit(1) + +report_utils.render_junit_xml_report(merged_resultset, args.xml_report, + suite_name='benchmarks') +if perf_report_failures > 0: + print('%s perf profile collection jobs failed' % perf_report_failures) sys.exit(1) diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py index de4a22877c..a94a615b88 100755 --- a/tools/run_tests/run_stress_tests.py +++ b/tools/run_tests/run_stress_tests.py @@ -33,9 +33,7 @@ from __future__ import print_function import argparse import atexit -import dockerjob import itertools -import jobset import json import multiprocessing import os @@ -46,6 +44,9 @@ import tempfile import time import uuid +import python_utils.dockerjob as dockerjob +import python_utils.jobset as jobset + # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 5bbc699f64..ab719a6727 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -54,9 +54,9 @@ import time from six.moves import urllib import uuid -import jobset -import report_utils -import watch_dirs +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils +import python_utils.watch_dirs as watch_dirs _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) @@ -116,7 +116,7 @@ class Config(object): def get_c_tests(travis, test_lang) : out = [] platforms_str = 'ci_platforms' if travis else 'platforms' - with open('tools/run_tests/tests.json') as f: + with open('tools/run_tests/generated/tests.json') as f: js = json.load(f) return [tgt for tgt in js @@ -300,7 +300,7 @@ class CLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_c.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']] else: return [] @@ -311,7 +311,7 @@ class CLanguage(object): if self.platform == 'windows': return [] else: - return [['tools/run_tests/post_tests_c.sh']] + return [['tools/run_tests/helper_scripts/post_tests_c.sh']] def makefile_name(self): return 'Makefile' @@ -389,24 +389,24 @@ class NodeLanguage(object): def test_specs(self): if self.platform == 'windows': - return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)] + return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])] else: run_script = 'run_node' if self.runtime == 'electron': run_script += '_electron' - return [self.config.job_spec(['tools/run_tests/{}.sh'.format(run_script), + return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script), self.node_version], None, environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']] else: build_script = 'pre_build_node' if self.runtime == 'electron': build_script += '_electron' - return [['tools/run_tests/{}.sh'.format(build_script), self.node_version]] + return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script), self.node_version]] def make_targets(self): return [] @@ -416,14 +416,14 @@ class NodeLanguage(object): def build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\build_node.bat']] else: build_script = 'build_node' if self.runtime == 'electron': build_script += '_electron' # building for electron requires a patch version self.node_version += '.0' - return [['tools/run_tests/{}.sh'.format(build_script), self.node_version]] + return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script), self.node_version]] def post_tests_steps(self): return [] @@ -446,7 +446,7 @@ class PhpLanguage(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['src/php/bin/run_tests.sh'], None, + return [self.config.job_spec(['src/php/bin/run_tests.sh'], environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): @@ -459,10 +459,10 @@ class PhpLanguage(object): return [] def build_steps(self): - return [['tools/run_tests/build_php.sh']] + return [['tools/run_tests/helper_scripts/build_php.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_php.sh']] + return [['tools/run_tests/helper_scripts/post_tests_php.sh']] def makefile_name(self): return 'Makefile' @@ -482,7 +482,7 @@ class Php7Language(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['src/php/bin/run_tests.sh'], None, + return [self.config.job_spec(['src/php/bin/run_tests.sh'], environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): @@ -495,10 +495,10 @@ class Php7Language(object): return [] def build_steps(self): - return [['tools/run_tests/build_php.sh']] + return [['tools/run_tests/helper_scripts/build_php.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_php.sh']] + return [['tools/run_tests/helper_scripts/post_tests_php.sh']] def makefile_name(self): return 'Makefile' @@ -530,7 +530,7 @@ class PythonLanguage(object): config.run, timeout_seconds=5*60, environ=dict(list(environment.items()) + - [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]), + [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]), shortname='%s.test.%s' % (config.name, suite_name),) for suite_name in tests_json for config in self.pythons] @@ -567,18 +567,18 @@ class PythonLanguage(object): if os.name == 'nt': shell = ['bash'] - builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')] + builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')] builder_prefix_arguments = ['MINGW{}'.format(bits)] venv_relative_python = ['Scripts/python.exe'] toolchain = ['mingw32'] else: shell = [] - builder = [os.path.abspath('tools/run_tests/build_python.sh')] + builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')] builder_prefix_arguments = [] venv_relative_python = ['bin/python'] toolchain = ['unix'] - runner = [os.path.abspath('tools/run_tests/run_python.sh')] + runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')] config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments, venv_relative_python, toolchain, runner) python27_config = _python_config_generator(name='py27', major='2', @@ -630,12 +630,12 @@ class RubyLanguage(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], + return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'], timeout_seconds=10*60, environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): - return [['tools/run_tests/pre_build_ruby.sh']] + return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']] def make_targets(self): return [] @@ -644,10 +644,10 @@ class RubyLanguage(object): return [] def build_steps(self): - return [['tools/run_tests/build_ruby.sh']] + return [['tools/run_tests/helper_scripts/build_ruby.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_ruby.sh']] + return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']] def makefile_name(self): return 'Makefile' @@ -721,7 +721,6 @@ class CSharpLanguage(object): for test in tests_by_assembly[assembly]: cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args specs.append(self.config.job_spec(cmdline, - None, shortname='csharp.%s' % test, environ=_FORCE_ENVIRON_FOR_WRAPPERS)) else: @@ -739,7 +738,6 @@ class CSharpLanguage(object): # to prevent problems with registering the profiler. run_exclusive = 1000000 specs.append(self.config.job_spec(cmdline, - None, shortname='csharp.coverage.%s' % assembly, cpu_cost=run_exclusive, environ=_FORCE_ENVIRON_FOR_WRAPPERS)) @@ -747,9 +745,9 @@ class CSharpLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_csharp.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']] else: - return [['tools/run_tests/pre_build_csharp.sh']] + return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']] def make_targets(self): return ['grpc_csharp_ext'] @@ -760,22 +758,22 @@ class CSharpLanguage(object): def build_steps(self): if self.args.compiler == 'coreclr': if self.platform == 'windows': - return [['tools\\run_tests\\build_csharp_coreclr.bat']] + return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']] else: - return [['tools/run_tests/build_csharp_coreclr.sh']] + return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']] else: if self.platform == 'windows': return [[_windows_build_bat(self.args.compiler), 'src/csharp/Grpc.sln', '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]] else: - return [['tools/run_tests/build_csharp.sh']] + return [['tools/run_tests/helper_scripts/build_csharp.sh']] def post_tests_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\post_tests_csharp.bat']] + return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']] else: - return [['tools/run_tests/post_tests_csharp.sh']] + return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']] def makefile_name(self): return 'Makefile' @@ -798,7 +796,7 @@ class ObjCLanguage(object): def test_specs(self): return [ self.config.job_spec(['src/objective-c/tests/run_tests.sh'], - timeout_seconds=None, + timeout_seconds=60*60, shortname='objc-tests', environ=_FORCE_ENVIRON_FOR_WRAPPERS), self.config.job_spec(['src/objective-c/tests/build_example_test.sh'], @@ -842,8 +840,12 @@ class Sanity(object): def test_specs(self): import yaml with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f: + environ={'TEST': 'true'} + if _is_use_docker_child(): + environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true' return [self.config.job_spec(cmd['script'].split(), - timeout_seconds=None, environ={'TEST': 'true'}, + timeout_seconds=30*60, + environ=environ, cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)] @@ -894,9 +896,9 @@ class NodeExpressLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']] else: - return [['tools/run_tests/pre_build_node.sh', self.node_version]] + return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]] def make_targets(self): return [] @@ -920,7 +922,7 @@ class NodeExpressLanguage(object): return 'node_express' # different configurations we can run under -with open('tools/run_tests/configs.json') as f: +with open('tools/run_tests/generated/configs.json') as f: _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read())) @@ -1117,6 +1119,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str, help='Generates a JUnit-compatible XML report') argp.add_argument('--report_suite_name', default='tests', type=str, help='Test suite name to use in generated JUnit XML report') +argp.add_argument('--quiet_success', + default=False, + action='store_const', + const=True, + help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' + + 'Useful when running many iterations of each test (argument -n).') argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, help='Dont try to iterate over many polling strategies when they exist') args = argp.parse_args() @@ -1316,7 +1324,7 @@ def _start_port_server(port_server_port): running = False if running: current_version = int(subprocess.check_output( - [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), + [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'), 'dump_version'])) print('my port server is version %d' % current_version) running = (version >= current_version) @@ -1328,7 +1336,7 @@ def _start_port_server(port_server_port): fd, logfile = tempfile.mkstemp() os.close(fd) print('starting port_server, with log file %s' % logfile) - args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), + args = [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'), '-p', '%d' % port_server_port, '-l', logfile] env = dict(os.environ) env['BUILD_ID'] = 'pleaseDontKillMeJenkins' @@ -1434,7 +1442,7 @@ def _build_and_run( return [] # start antagonists - antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py']) + antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py']) for _ in range(0, args.antagonists)] port_server_port = 32766 _start_port_server(port_server_port) @@ -1464,20 +1472,24 @@ def _build_and_run( else itertools.repeat(massaged_one_run, runs_per_test)) all_runs = itertools.chain.from_iterable(runs_sequence) + if args.quiet_success: + jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True) num_test_failures, resultset = jobset.run( all_runs, check_cancelled, newline_on_success=newline_on_success, travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs, stop_on_failure=args.stop_on_failure, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}, + quiet_success=args.quiet_success) if resultset: for k, v in sorted(resultset.items()): num_runs, num_failures = _calculate_num_runs_failures(v) - if num_failures == num_runs: # what about infinite_runs??? - jobset.message('FAILED', k, do_newline=True) - elif num_failures > 0: - jobset.message( - 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), - do_newline=True) + if num_failures > 0: + if num_failures == num_runs: # what about infinite_runs??? + jobset.message('FAILED', k, do_newline=True) + else: + jobset.message( + 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), + do_newline=True) finally: for antagonist in antagonists: antagonist.kill() diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index d03e34766e..8f70a6d2ea 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -31,12 +31,13 @@ """Run test matrix.""" import argparse -import jobset import multiprocessing import os -import report_utils import sys -from filter_pull_request_tests import filter_tests + +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils +from python_utils.filter_pull_request_tests import filter_tests _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) @@ -69,7 +70,7 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_ workspace_name = 'workspace_%s' % name env = {'WORKSPACE_NAME': workspace_name} test_job = jobset.JobSpec( - cmdline=['tools/run_tests/run_tests_in_workspace.sh', + cmdline=['tools/run_tests/helper_scripts/run_tests_in_workspace.sh', '-t', '-j', str(inner_jobs), '-x', '../report_%s.xml' % name, @@ -251,6 +252,17 @@ def _allowed_labels(): return sorted(all_labels) +def _runs_per_test_type(arg_str): + """Auxiliary function to parse the "runs_per_test" flag.""" + try: + n = int(arg_str) + if n <= 0: raise ValueError + return n + except: + msg = '\'{}\' is not a positive integer'.format(arg_str) + raise argparse.ArgumentTypeError(msg) + + if __name__ == "__main__": argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp.add_argument('-j', '--jobs', @@ -262,6 +274,11 @@ if __name__ == "__main__": nargs='+', default=[], help='Filter targets to run by label with AND semantics.') + argp.add_argument('--exclude', + choices=_allowed_labels(), + nargs='+', + default=[], + help='Exclude targets with any of given labels.') argp.add_argument('--build_only', default=False, action='store_const', @@ -278,7 +295,7 @@ if __name__ == "__main__": default=False, action='store_const', const=True, - help='Filters out tests irrelavant to pull request changes.') + help='Filters out tests irrelevant to pull request changes.') argp.add_argument('--base_branch', default='origin/master', type=str, @@ -287,6 +304,9 @@ if __name__ == "__main__": default=_DEFAULT_INNER_JOBS, type=int, help='Number of jobs in each run_tests.py instance') + argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type, + help='How many times to run each tests. >1 runs implies ' + + 'omitting passing test from the output & reports.') args = argp.parse_args() extra_args = [] @@ -294,6 +314,10 @@ if __name__ == "__main__": extra_args.append('--build_only') if args.force_default_poller: extra_args.append('--force_default_poller') + if args.runs_per_test > 1: + extra_args.append('-n') + extra_args.append('%s' % args.runs_per_test) + extra_args.append('--quiet_success') all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) @@ -301,7 +325,8 @@ if __name__ == "__main__": jobs = [] for job in all_jobs: if not args.filter or all(filter in job.labels for filter in args.filter): - jobs.append(job) + if not any(exclude_label in job.labels for exclude_label in args.exclude): + jobs.append(job) if not jobs: jobset.message('FAILED', 'No test suites match given criteria.', diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py index b733ba173f..a86db02b80 100755 --- a/tools/run_tests/sanity/check_sources_and_headers.py +++ b/tools/run_tests/sanity/check_sources_and_headers.py @@ -34,7 +34,7 @@ import re import sys root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) -with open(os.path.join(root, 'tools', 'run_tests', 'sources_and_headers.json')) as f: +with open(os.path.join(root, 'tools', 'run_tests', 'generated', 'sources_and_headers.json')) as f: js = json.loads(f.read()) re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"') diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh index 6ec0786c96..be12f968d2 100755 --- a/tools/run_tests/sanity/check_submodules.sh +++ b/tools/run_tests/sanity/check_submodules.sh @@ -43,7 +43,7 @@ git submodule | awk '{ print $1 }' | sort > $submodules cat << EOF | awk '{ print $1 }' | sort > $want_submodules c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9 third_party/boringssl (version_for_cocoapods_2.0-100-gc880e42) 05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f) - 44c25c892a6229b20db7cd9dc05584ea865896de third_party/google_benchmark (v0.1.0-343-g44c25c8) + 44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8) c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0) a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0) 50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8) diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py index b522cdeb49..290a6e2ddf 100755 --- a/tools/run_tests/sanity/check_test_filtering.py +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -38,7 +38,7 @@ import re # hack import paths to pick up extra code sys.path.insert(0, os.path.abspath('tools/run_tests/')) from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs -import filter_pull_request_tests +import python_utils.filter_pull_request_tests as filter_pull_request_tests _LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py index 2e3fa443b9..fdc4668222 100755 --- a/tools/run_tests/task_runner.py +++ b/tools/run_tests/task_runner.py @@ -33,14 +33,13 @@ from __future__ import print_function import argparse -import atexit -import jobset import multiprocessing import sys -import artifact_targets -import distribtest_targets -import package_targets +import artifacts.artifact_targets as artifact_targets +import artifacts.distribtest_targets as distribtest_targets +import artifacts.package_targets as package_targets +import python_utils.jobset as jobset _TARGETS = [] _TARGETS += artifact_targets.targets() diff --git a/vsprojects/README.md b/vsprojects/README.md index 56d9f56009..7af69c2726 100644 --- a/vsprojects/README.md +++ b/vsprojects/README.md @@ -83,10 +83,12 @@ Windows .exe binaries of gRPC protoc plugins. 1. Follow instructions in `third_party\protobuf\cmake\README.md` to create Visual Studio 2013 projects for protobuf. ``` $ cd third_party/protobuf/cmake -$ cmake -G "Visual Studio 12 2013" +$ mkdir build & cd build +$ mkdir solution & cd solution +$ cmake -G "Visual Studio 12 2013" -Dprotobuf_BUILD_TESTS=OFF ../.. ``` -2. Open solution `third_party\protobuf\cmake\protobuf.sln` and build it in Release mode. That will build libraries `libprotobuf.lib` and `libprotoc.lib` needed for the next step. +2. Open solution `third_party\protobuf\cmake\build\solution\protobuf.sln` and build it in Release mode. That will build libraries `libprotobuf.lib` and `libprotoc.lib` needed for the next step. 3. Open solution `vsprojects\grpc_protoc_plugins.sln` and build it in Release mode. As a result, you should obtain a set of gRPC protoc plugin binaries (`grpc_cpp_plugin.exe`, `grpc_csharp_plugin.exe`, ...) diff --git a/vsprojects/build_plugins.bat b/vsprojects/build_plugins.bat index 7c8e056dc4..ae5c5f09be 100644 --- a/vsprojects/build_plugins.bat +++ b/vsprojects/build_plugins.bat @@ -38,7 +38,7 @@ cd /d %~dp0 @call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" x86 @rem Build third_party/protobuf -msbuild ..\third_party\protobuf\cmake\protobuf.sln /p:Configuration=Release || goto :error +msbuild ..\third_party\protobuf\cmake\build\solution\protobuf.sln /p:Configuration=Release || goto :error @rem Build the C# protoc plugins msbuild grpc_protoc_plugins.sln /p:Configuration=Release || goto :error diff --git a/vsprojects/protobuf.props b/vsprojects/protobuf.props index b1de8af27a..b828313572 100644 --- a/vsprojects/protobuf.props +++ b/vsprojects/protobuf.props @@ -1 +1 @@ -<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <Link> <AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file +<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <Link> <AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file diff --git a/vsprojects/protoc.props b/vsprojects/protoc.props index 1bdc07193b..87fff8f128 100644 --- a/vsprojects/protoc.props +++ b/vsprojects/protoc.props @@ -1 +1 @@ -<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <ClCompile> <DisableSpecificWarnings>4244;4267;%(DisableSpecificWarnings)</DisableSpecificWarnings> </ClCompile> <Link> <AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file +<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <ClCompile> <DisableSpecificWarnings>4244;4267;%(DisableSpecificWarnings)</DisableSpecificWarnings> </ClCompile> <Link> <AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file diff --git a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj b/vsprojects/vcxproj/benchmark/benchmark.vcxproj index 52774e0802..9f262b3b00 100644 --- a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj +++ b/vsprojects/vcxproj/benchmark/benchmark.vcxproj @@ -19,7 +19,7 @@ </ProjectConfiguration> </ItemGroup> <PropertyGroup Label="Globals"> - <ProjectGuid>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</ProjectGuid> + <ProjectGuid>{07978586-E47C-8709-A63E-895FBF3C3C7D}</ProjectGuid> <IgnoreWarnIntDirInTempDetected>true</IgnoreWarnIntDirInTempDetected> <IntDir>$(SolutionDir)IntDir\$(MSBuildProjectName)\</IntDir> </PropertyGroup> @@ -57,10 +57,10 @@ </ImportGroup> <PropertyGroup Label="UserMacros" /> <PropertyGroup Condition="'$(Configuration)'=='Debug'"> - <TargetName>google_benchmark</TargetName> + <TargetName>benchmark</TargetName> </PropertyGroup> <PropertyGroup Condition="'$(Configuration)'=='Release'"> - <TargetName>google_benchmark</TargetName> + <TargetName>benchmark</TargetName> </PropertyGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ClCompile> @@ -147,53 +147,53 @@ </ItemDefinitionGroup> <ItemGroup> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark_api.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\macros.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\reporter.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\arraysize.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_api_internal.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\check.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\cycleclock.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\internal_macros.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\log.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\mutex.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\re.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\stat.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark_api.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\macros.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\reporter.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\arraysize.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_api_internal.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\check.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\cycleclock.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\internal_macros.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\log.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\mutex.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\re.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\stat.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\timers.h" /> </ItemGroup> <ItemGroup> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_register.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_register.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\console_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\console_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\csv_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\csv_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\json_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\json_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\timers.cc"> </ClCompile> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> diff --git a/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters b/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters new file mode 100644 index 0000000000..ccc9ca2cae --- /dev/null +++ b/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters @@ -0,0 +1,125 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_register.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\console_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\csv_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\json_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\timers.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + </ItemGroup> + <ItemGroup> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark_api.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\macros.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\reporter.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\arraysize.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_api_internal.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\check.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\cycleclock.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\internal_macros.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\log.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\mutex.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\re.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\stat.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\timers.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + </ItemGroup> + + <ItemGroup> + <Filter Include="third_party"> + <UniqueIdentifier>{7b593518-9fee-107e-6b64-24bdce73f939}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark"> + <UniqueIdentifier>{f0d35de1-6b41-778d-0ba0-faad514fb0f4}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\include"> + <UniqueIdentifier>{cbc02dfa-face-8cc6-0efb-efacc0c3369c}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\include\benchmark"> + <UniqueIdentifier>{4f2f03fc-b82d-df33-63ee-bedebeb2c0ee}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\src"> + <UniqueIdentifier>{f42a8e0a-5a76-0e6f-d708-f0306858f673}</UniqueIdentifier> + </Filter> + </ItemGroup> +</Project> + diff --git a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters b/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters deleted file mode 100644 index 9db6ed4657..0000000000 --- a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters +++ /dev/null @@ -1,125 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> - <ItemGroup> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_register.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\console_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\csv_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\json_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - </ItemGroup> - <ItemGroup> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark_api.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\macros.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\reporter.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\arraysize.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_api_internal.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\check.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\cycleclock.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\internal_macros.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\log.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\mutex.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\re.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\stat.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - </ItemGroup> - - <ItemGroup> - <Filter Include="third_party"> - <UniqueIdentifier>{7458b63d-7ba4-103d-2bed-3e3ad30d8237}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark"> - <UniqueIdentifier>{54a154e8-669b-a7c1-9b6e-bd1aab2f86e3}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\include"> - <UniqueIdentifier>{f54c3cb1-ec20-a651-6956-78379b51e1a5}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\include\benchmark"> - <UniqueIdentifier>{0483a457-8050-4565-bc15-09695bf7b822}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\src"> - <UniqueIdentifier>{c39ff2d1-691e-4614-4d75-4bc20db05e09}</UniqueIdentifier> - </Filter> - </ItemGroup> -</Project> - diff --git a/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj b/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj index 1ce993e323..3809beb508 100644 --- a/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj +++ b/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj @@ -164,8 +164,8 @@ </ClCompile> </ItemGroup> <ItemGroup> - <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\google_benchmark\google_benchmark.vcxproj"> - <Project>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</Project> + <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\benchmark\benchmark.vcxproj"> + <Project>{07978586-E47C-8709-A63E-895FBF3C3C7D}</Project> </ProjectReference> <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc++_test_util\grpc++_test_util.vcxproj"> <Project>{0BE77741-552A-929B-A497-4EF7ECE17A64}</Project> diff --git a/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj b/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj index 99f33b2165..15a82c0ed6 100644 --- a/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj +++ b/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj @@ -164,8 +164,8 @@ </ClCompile> </ItemGroup> <ItemGroup> - <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\google_benchmark\google_benchmark.vcxproj"> - <Project>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</Project> + <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\benchmark\benchmark.vcxproj"> + <Project>{07978586-E47C-8709-A63E-895FBF3C3C7D}</Project> </ProjectReference> </ItemGroup> <ItemGroup> |