aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.travis.yml12
-rw-r--r--BUILD456
-rw-r--r--CMakeLists.txt236
-rw-r--r--Makefile380
-rw-r--r--build.yaml20
-rw-r--r--composer.json8
-rw-r--r--doc/c-style-guide.md53
-rw-r--r--doc/statuscodes.md1
-rw-r--r--examples/csharp/helloworld/README.md16
-rw-r--r--examples/objective-c/auth_sample/AuthTestService.podspec34
-rw-r--r--examples/objective-c/auth_sample/Podfile34
-rw-r--r--examples/objective-c/helloworld/HelloWorld.podspec34
-rw-r--r--examples/objective-c/helloworld/Podfile34
-rw-r--r--examples/objective-c/route_guide/Podfile34
-rw-r--r--examples/objective-c/route_guide/RouteGuide.podspec34
-rw-r--r--examples/php/README.md2
-rw-r--r--examples/php/composer.json6
-rw-r--r--include/grpc++/server.h3
-rw-r--r--include/grpc++/support/slice.h3
-rw-r--r--package.json2
-rw-r--r--package.xml22
-rw-r--r--setup.py83
-rw-r--r--src/compiler/config.h7
-rw-r--r--src/compiler/cpp_generator.cc96
-rw-r--r--src/compiler/cpp_plugin.cc30
-rw-r--r--src/compiler/csharp_generator.cc271
-rw-r--r--src/compiler/csharp_generator_helpers.h6
-rw-r--r--src/compiler/csharp_plugin.cc6
-rw-r--r--src/compiler/generator_helpers.h14
-rw-r--r--src/compiler/node_generator.cc62
-rw-r--r--src/compiler/node_generator_helpers.h2
-rw-r--r--src/compiler/objective_c_generator.cc29
-rw-r--r--src/compiler/objective_c_generator_helpers.h1
-rw-r--r--src/compiler/objective_c_plugin.cc48
-rw-r--r--src/compiler/python_generator.cc333
-rw-r--r--src/compiler/python_generator.h1
-rw-r--r--src/compiler/ruby_generator.cc33
-rw-r--r--src/compiler/ruby_generator_map-inl.h5
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c6
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c387
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h67
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c18
-rw-r--r--src/core/ext/transport/chttp2/transport/status_conversion.c10
-rw-r--r--src/core/ext/transport/chttp2/transport/status_conversion.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c20
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c16
-rw-r--r--src/core/lib/iomgr/endpoint.c4
-rw-r--r--src/core/lib/iomgr/endpoint.h4
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c277
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c3
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c3
-rw-r--r--src/core/lib/iomgr/ev_posix.c4
-rw-r--r--src/core/lib/iomgr/ev_posix.h4
-rw-r--r--src/core/lib/iomgr/exec_ctx.c10
-rw-r--r--src/core/lib/iomgr/exec_ctx.h6
-rw-r--r--src/core/lib/iomgr/iomgr.c3
-rw-r--r--src/core/lib/iomgr/network_status_tracker.c24
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h4
-rw-r--r--src/core/lib/iomgr/tcp_posix.c18
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c3
-rw-r--r--src/core/lib/iomgr/tcp_windows.c13
-rw-r--r--src/core/lib/iomgr/workqueue.h39
-rw-r--r--src/core/lib/iomgr/workqueue_posix.c8
-rw-r--r--src/core/lib/iomgr/workqueue_posix.h5
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c22
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c18
-rw-r--r--src/core/lib/support/time.c122
-rw-r--r--src/core/lib/surface/call.c3
-rw-r--r--src/core/lib/surface/server.c76
-rw-r--r--src/core/lib/surface/version.c2
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/cpp/server/server.cc8
-rw-r--r--src/csharp/Grpc.Auth/project.json4
-rw-r--r--src/csharp/Grpc.Core/VersionInfo.cs4
-rw-r--r--src/csharp/Grpc.Core/project.json2
-rw-r--r--src/csharp/Grpc.HealthCheck/project.json4
-rw-r--r--src/csharp/README.md4
-rw-r--r--src/csharp/build_packages.bat2
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c8
-rw-r--r--src/node/health_check/package.json2
-rw-r--r--src/node/tools/package.json2
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec122
-rw-r--r--src/objective-c/!ProtoCompiler.podspec136
-rw-r--r--src/objective-c/BoringSSL.podspec7
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m8
-rw-r--r--src/objective-c/README.md109
-rw-r--r--src/objective-c/examples/RemoteTestClient/RemoteTest.podspec21
-rw-r--r--src/objective-c/examples/Sample/Podfile3
-rw-r--r--src/objective-c/examples/SwiftSample/Podfile3
-rw-r--r--src/objective-c/tests/Podfile14
-rw-r--r--src/objective-c/tests/RemoteTestClient/RemoteTest.podspec26
-rw-r--r--src/php/README.md8
-rw-r--r--src/php/composer.json11
-rw-r--r--src/proto/grpc/testing/control.proto3
-rw-r--r--src/python/grpcio/_unixccompiler_patch.py98
-rw-r--r--src/python/grpcio/commands.py24
-rw-r--r--src/python/grpcio/grpc/__init__.py39
-rw-r--r--src/python/grpcio/grpc/_adapter/.gitignore5
-rw-r--r--src/python/grpcio/grpc/_adapter/__init__.py30
-rw-r--r--src/python/grpcio/grpc/_adapter/_common.py76
-rw-r--r--src/python/grpcio/grpc/_adapter/_intermediary_low.py258
-rw-r--r--src/python/grpcio/grpc/_adapter/_low.py229
-rw-r--r--src/python/grpcio/grpc/_adapter/_types.py446
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi3
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi3
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi16
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi9
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi9
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi3
-rw-r--r--src/python/grpcio/grpc/_links/__init__.py30
-rw-r--r--src/python/grpcio/grpc/_links/_constants.py42
-rw-r--r--src/python/grpcio/grpc/_links/invocation.py453
-rw-r--r--src/python/grpcio/grpc/_links/service.py509
-rw-r--r--src/python/grpcio/grpc/beta/_server.py209
-rw-r--r--src/python/grpcio/grpc/beta/_stub.py155
-rw-r--r--src/python/grpcio/grpc/beta/implementations.py1
-rw-r--r--src/python/grpcio/grpc/framework/core/__init__.py30
-rw-r--r--src/python/grpcio/grpc/framework/core/_constants.py60
-rw-r--r--src/python/grpcio/grpc/framework/core/_context.py94
-rw-r--r--src/python/grpcio/grpc/framework/core/_emission.py100
-rw-r--r--src/python/grpcio/grpc/framework/core/_end.py244
-rw-r--r--src/python/grpcio/grpc/framework/core/_expiration.py154
-rw-r--r--src/python/grpcio/grpc/framework/core/_ingestion.py439
-rw-r--r--src/python/grpcio/grpc/framework/core/_interfaces.py331
-rw-r--r--src/python/grpcio/grpc/framework/core/_operation.py204
-rw-r--r--src/python/grpcio/grpc/framework/core/_protocol.py176
-rw-r--r--src/python/grpcio/grpc/framework/core/_reception.py159
-rw-r--r--src/python/grpcio/grpc/framework/core/_termination.py229
-rw-r--r--src/python/grpcio/grpc/framework/core/_transmission.py335
-rw-r--r--src/python/grpcio/grpc/framework/core/_utilities.py54
-rw-r--r--src/python/grpcio/grpc/framework/core/implementations.py62
-rw-r--r--src/python/grpcio/grpc/framework/crust/__init__.py30
-rw-r--r--src/python/grpcio/grpc/framework/crust/_calls.py223
-rw-r--r--src/python/grpcio/grpc/framework/crust/_control.py584
-rw-r--r--src/python/grpcio/grpc/framework/crust/_service.py173
-rw-r--r--src/python/grpcio/grpc/framework/crust/implementations.py366
-rw-r--r--src/python/grpcio/grpc/framework/foundation/_timer_future.py228
-rw-r--r--src/python/grpcio/grpc/framework/foundation/activated.py65
-rw-r--r--src/python/grpcio/grpc/framework/foundation/later.py51
-rw-r--r--src/python/grpcio/grpc/framework/foundation/relay.py174
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/links/__init__.py30
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/links/links.py143
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/links/utilities.py44
-rw-r--r--src/python/grpcio/grpc_version.py2
-rw-r--r--src/python/grpcio_tests/grpc_version.py2
-rw-r--r--src/python/grpcio_tests/tests/interop/methods.py46
-rw-r--r--src/python/grpcio_tests/tests/qps/qps_worker.py2
-rw-r--r--src/python/grpcio_tests/tests/qps/worker_server.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_adapter/.gitignore5
-rw-r--r--src/python/grpcio_tests/tests/unit/_adapter/__init__.py30
-rw-r--r--src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py262
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_scenarios.py8
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_test.py1
-rw-r--r--src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py266
-rw-r--r--src/python/grpcio_tests/tests/unit/_links/__init__.py30
-rw-r--r--src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py262
-rw-r--r--src/python/grpcio_tests/tests/unit/_rpc_test.py25
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/core/__init__.py30
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py30
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py570
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py171
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py55
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py279
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py186
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py95
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py30
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py327
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py167
-rwxr-xr-xsrc/ruby/bin/math_services.rb6
-rw-r--r--src/ruby/ext/grpc/rb_call.c8
-rw-r--r--src/ruby/ext/grpc/rb_compression_options.c464
-rw-r--r--src/ruby/ext/grpc/rb_compression_options.h44
-rw-r--r--src/ruby/ext/grpc/rb_grpc.c4
-rw-r--r--src/ruby/ext/grpc/rb_server.c2
-rw-r--r--src/ruby/lib/grpc/version.rb2
-rw-r--r--src/ruby/pb/src/proto/grpc/testing/messages.rb18
-rwxr-xr-xsrc/ruby/pb/test/client.rb152
-rw-r--r--src/ruby/pb/test/proto/empty.rb15
-rw-r--r--src/ruby/pb/test/proto/messages.rb80
-rw-r--r--src/ruby/pb/test/proto/test.rb14
-rw-r--r--src/ruby/pb/test/proto/test_services.rb64
-rwxr-xr-xsrc/ruby/pb/test/server.rb6
-rw-r--r--src/ruby/qps/src/proto/grpc/testing/messages.rb18
-rw-r--r--src/ruby/spec/compression_options_spec.rb164
-rw-r--r--src/ruby/tools/version.rb2
-rw-r--r--templates/composer.json.template8
-rw-r--r--templates/package.xml.template18
-rw-r--r--templates/tools/dockerfile/apt_get_pyenv.include18
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template3
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/python_deps.include2
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template2
-rw-r--r--templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template2
-rw-r--r--templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/fuzzer/Dockerfile.template1
-rw-r--r--templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template2
-rw-r--r--templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template2
-rw-r--r--templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template39
-rw-r--r--templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template2
-rw-r--r--templates/tools/dockerfile/test/sanity/Dockerfile.template1
-rw-r--r--test/core/end2end/dualstack_socket_test.c2
-rw-r--r--test/core/end2end/tests/high_initial_seqno.c6
-rw-r--r--test/core/end2end/tests/network_status_change.c5
-rw-r--r--test/core/internal_api_canaries/iomgr.c13
-rw-r--r--test/core/iomgr/workqueue_test.c150
-rw-r--r--test/core/surface/sequential_connectivity_test.c2
-rw-r--r--test/core/transport/chttp2/status_conversion_test.c68
-rw-r--r--test/core/util/mock_endpoint.c12
-rw-r--r--test/core/util/passthru_endpoint.c12
-rw-r--r--test/cpp/end2end/async_end2end_test.cc25
-rw-r--r--test/cpp/end2end/end2end_test.cc3
-rw-r--r--test/cpp/qps/client.h68
-rw-r--r--test/cpp/qps/client_async.cc103
-rw-r--r--test/cpp/qps/client_sync.cc28
-rw-r--r--test/cpp/qps/driver.cc135
-rw-r--r--test/cpp/qps/driver.h2
-rwxr-xr-xtest/cpp/qps/gen_build_yaml.py11
-rw-r--r--test/cpp/qps/json_run_localhost.cc2
-rw-r--r--test/cpp/qps/qps_json_driver.cc18
-rw-r--r--test/cpp/qps/qps_worker.cc40
-rw-r--r--test/cpp/qps/server_async.cc57
-rw-r--r--test/cpp/util/slice_test.cc10
-rwxr-xr-xtools/distrib/python/docgen.py1
-rw-r--r--tools/distrib/python/grpcio_tools/grpc_version.py2
-rw-r--r--tools/distrib/python/grpcio_tools/setup.py65
-rwxr-xr-xtools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh2
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_go/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_java/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_node/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_php/Dockerfile15
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_python/Dockerfile2
-rw-r--r--tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile15
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile2
-rw-r--r--tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile15
-rw-r--r--tools/dockerfile/test/csharp_coreclr_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/csharp_jessie_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/cxx_jessie_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/cxx_jessie_x86/Dockerfile15
-rw-r--r--tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/cxx_wheezy_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/fuzzer/Dockerfile15
-rw-r--r--tools/dockerfile/test/multilang_jessie_x64/Dockerfile2
-rw-r--r--tools/dockerfile/test/node_jessie_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/php_jessie_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/python_jessie_x64/Dockerfile2
-rw-r--r--tools/dockerfile/test/python_pyenv_x64/Dockerfile112
-rw-r--r--tools/dockerfile/test/ruby_jessie_x64/Dockerfile15
-rw-r--r--tools/dockerfile/test/sanity/Dockerfile15
-rw-r--r--tools/doxygen/Doxyfile.c++12
-rw-r--r--tools/doxygen/Doxyfile.c++.internal229
-rw-r--r--tools/doxygen/Doxyfile.core2
-rw-r--r--tools/doxygen/Doxyfile.core.internal2
-rw-r--r--tools/run_tests/artifact_targets.py2
-rw-r--r--tools/run_tests/build_artifact_python.bat23
-rwxr-xr-xtools/run_tests/build_python.sh13
-rw-r--r--tools/run_tests/distribtest_targets.py2
-rwxr-xr-xtools/run_tests/dockerjob.py4
-rwxr-xr-xtools/run_tests/jobset.py8
-rw-r--r--tools/run_tests/package_targets.py2
-rw-r--r--tools/run_tests/perf_html_report.template21
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py2
-rw-r--r--tools/run_tests/performance/scenario_result_schema.json10
-rwxr-xr-xtools/run_tests/port_server.py12
-rw-r--r--tools/run_tests/report_utils.py45
-rwxr-xr-xtools/run_tests/run_interop_tests.py30
-rwxr-xr-xtools/run_tests/run_performance_tests.py23
-rwxr-xr-xtools/run_tests/run_stress_tests.py16
-rwxr-xr-xtools/run_tests/run_tests.py133
-rw-r--r--tools/run_tests/sources_and_headers.json24
-rwxr-xr-xtools/run_tests/task_runner.py4
-rw-r--r--tools/run_tests/tests.json131
-rw-r--r--vsprojects/grpc.sln2
-rw-r--r--vsprojects/vcxproj/grpc++/grpc++.vcxproj346
-rw-r--r--vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters765
-rw-r--r--vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj346
-rw-r--r--vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters765
305 files changed, 4436 insertions, 15570 deletions
diff --git a/.travis.yml b/.travis.yml
index 7576e076a0..4cdad37c6c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,5 @@
+git:
+ depth: 1
language: objective-c
osx_image: xcode7.3
env:
@@ -14,7 +16,7 @@ env:
- SCHEME="InteropTestsLocalCleartext" WORKSPACE="Tests.xcworkspace"
TEST_PATH="src/objective-c/tests" BUILD_ONLY="false"
INTEROP_SERVER="true"
- # TODO(jcanizales): Investigate why they time out:
+ # TODO(jcanizales): Make tests an app project (instead of library), so the following will work.
# - SCHEME="InteropTestsRemote" WORKSPACE="Tests.xcworkspace"
# TEST_PATH="src/objective-c/tests" BUILD_ONLY="false"
# INTEROP_SERVER="true"
@@ -34,15 +36,15 @@ env:
TEST_PATH="src/objective-c/examples/SwiftSample" BUILD_ONLY="true"
INTEROP_SERVER="false"
before_install:
+ # Until Travis upgrades from Cocoapods 0.39, we need to do it here.
- pod --version
- gem uninstall cocoapods -a
- - gem install cocoapods -v '1.0.0'
+ - gem install cocoapods -v '1.0.1'
- pod --version
+ # Recent pods aren't found if we don't explicitly update Cocoapods' repo.
+ - pod repo update
- brew install gflags
install:
- - make grpc_objective_c_plugin
- - install bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc
- - install bins/opt/protobuf/protoc /usr/local/bin/protoc
- pushd $TEST_PATH
- pod install
- popd
diff --git a/BUILD b/BUILD
index 8c17065927..33323be229 100644
--- a/BUILD
+++ b/BUILD
@@ -1235,109 +1235,6 @@ cc_library(
"src/cpp/client/create_channel_internal.h",
"src/cpp/server/dynamic_thread_pool.h",
"src/cpp/server/thread_pool_interface.h",
- "src/core/lib/channel/channel_args.h",
- "src/core/lib/channel/channel_stack.h",
- "src/core/lib/channel/channel_stack_builder.h",
- "src/core/lib/channel/compress_filter.h",
- "src/core/lib/channel/connected_channel.h",
- "src/core/lib/channel/context.h",
- "src/core/lib/channel/http_client_filter.h",
- "src/core/lib/channel/http_server_filter.h",
- "src/core/lib/compression/algorithm_metadata.h",
- "src/core/lib/compression/message_compress.h",
- "src/core/lib/debug/trace.h",
- "src/core/lib/http/format_request.h",
- "src/core/lib/http/httpcli.h",
- "src/core/lib/http/parser.h",
- "src/core/lib/iomgr/closure.h",
- "src/core/lib/iomgr/endpoint.h",
- "src/core/lib/iomgr/endpoint_pair.h",
- "src/core/lib/iomgr/error.h",
- "src/core/lib/iomgr/ev_epoll_linux.h",
- "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
- "src/core/lib/iomgr/ev_poll_posix.h",
- "src/core/lib/iomgr/ev_posix.h",
- "src/core/lib/iomgr/exec_ctx.h",
- "src/core/lib/iomgr/executor.h",
- "src/core/lib/iomgr/iocp_windows.h",
- "src/core/lib/iomgr/iomgr.h",
- "src/core/lib/iomgr/iomgr_internal.h",
- "src/core/lib/iomgr/iomgr_posix.h",
- "src/core/lib/iomgr/load_file.h",
- "src/core/lib/iomgr/network_status_tracker.h",
- "src/core/lib/iomgr/polling_entity.h",
- "src/core/lib/iomgr/pollset.h",
- "src/core/lib/iomgr/pollset_set.h",
- "src/core/lib/iomgr/pollset_set_windows.h",
- "src/core/lib/iomgr/pollset_windows.h",
- "src/core/lib/iomgr/resolve_address.h",
- "src/core/lib/iomgr/sockaddr.h",
- "src/core/lib/iomgr/sockaddr_posix.h",
- "src/core/lib/iomgr/sockaddr_utils.h",
- "src/core/lib/iomgr/sockaddr_windows.h",
- "src/core/lib/iomgr/socket_utils_posix.h",
- "src/core/lib/iomgr/socket_windows.h",
- "src/core/lib/iomgr/tcp_client.h",
- "src/core/lib/iomgr/tcp_posix.h",
- "src/core/lib/iomgr/tcp_server.h",
- "src/core/lib/iomgr/tcp_windows.h",
- "src/core/lib/iomgr/time_averaged_stats.h",
- "src/core/lib/iomgr/timer.h",
- "src/core/lib/iomgr/timer_heap.h",
- "src/core/lib/iomgr/udp_server.h",
- "src/core/lib/iomgr/unix_sockets_posix.h",
- "src/core/lib/iomgr/wakeup_fd_pipe.h",
- "src/core/lib/iomgr/wakeup_fd_posix.h",
- "src/core/lib/iomgr/workqueue.h",
- "src/core/lib/iomgr/workqueue_posix.h",
- "src/core/lib/iomgr/workqueue_windows.h",
- "src/core/lib/json/json.h",
- "src/core/lib/json/json_common.h",
- "src/core/lib/json/json_reader.h",
- "src/core/lib/json/json_writer.h",
- "src/core/lib/surface/api_trace.h",
- "src/core/lib/surface/call.h",
- "src/core/lib/surface/call_test_only.h",
- "src/core/lib/surface/channel.h",
- "src/core/lib/surface/channel_init.h",
- "src/core/lib/surface/channel_stack_type.h",
- "src/core/lib/surface/completion_queue.h",
- "src/core/lib/surface/event_string.h",
- "src/core/lib/surface/init.h",
- "src/core/lib/surface/lame_client.h",
- "src/core/lib/surface/server.h",
- "src/core/lib/transport/byte_stream.h",
- "src/core/lib/transport/connectivity_state.h",
- "src/core/lib/transport/metadata.h",
- "src/core/lib/transport/metadata_batch.h",
- "src/core/lib/transport/static_metadata.h",
- "src/core/lib/transport/transport.h",
- "src/core/lib/transport/transport_impl.h",
- "src/core/lib/security/context/security_context.h",
- "src/core/lib/security/credentials/composite/composite_credentials.h",
- "src/core/lib/security/credentials/credentials.h",
- "src/core/lib/security/credentials/fake/fake_credentials.h",
- "src/core/lib/security/credentials/google_default/google_default_credentials.h",
- "src/core/lib/security/credentials/iam/iam_credentials.h",
- "src/core/lib/security/credentials/jwt/json_token.h",
- "src/core/lib/security/credentials/jwt/jwt_credentials.h",
- "src/core/lib/security/credentials/jwt/jwt_verifier.h",
- "src/core/lib/security/credentials/oauth2/oauth2_credentials.h",
- "src/core/lib/security/credentials/plugin/plugin_credentials.h",
- "src/core/lib/security/credentials/ssl/ssl_credentials.h",
- "src/core/lib/security/transport/auth_filters.h",
- "src/core/lib/security/transport/handshake.h",
- "src/core/lib/security/transport/secure_endpoint.h",
- "src/core/lib/security/transport/security_connector.h",
- "src/core/lib/security/transport/tsi_error.h",
- "src/core/lib/security/util/b64.h",
- "src/core/lib/security/util/json_util.h",
- "src/core/ext/transport/chttp2/alpn/alpn.h",
- "src/core/lib/tsi/fake_transport_security.h",
- "src/core/lib/tsi/ssl_transport_security.h",
- "src/core/lib/tsi/ssl_types.h",
- "src/core/lib/tsi/transport_security.h",
- "src/core/lib/tsi/transport_security_interface.h",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
@@ -1370,122 +1267,6 @@ cc_library(
"src/cpp/util/status.cc",
"src/cpp/util/string_ref.cc",
"src/cpp/util/time.cc",
- "src/core/lib/channel/channel_args.c",
- "src/core/lib/channel/channel_stack.c",
- "src/core/lib/channel/channel_stack_builder.c",
- "src/core/lib/channel/compress_filter.c",
- "src/core/lib/channel/connected_channel.c",
- "src/core/lib/channel/http_client_filter.c",
- "src/core/lib/channel/http_server_filter.c",
- "src/core/lib/compression/compression.c",
- "src/core/lib/compression/message_compress.c",
- "src/core/lib/debug/trace.c",
- "src/core/lib/http/format_request.c",
- "src/core/lib/http/httpcli.c",
- "src/core/lib/http/parser.c",
- "src/core/lib/iomgr/closure.c",
- "src/core/lib/iomgr/endpoint.c",
- "src/core/lib/iomgr/endpoint_pair_posix.c",
- "src/core/lib/iomgr/endpoint_pair_windows.c",
- "src/core/lib/iomgr/error.c",
- "src/core/lib/iomgr/ev_epoll_linux.c",
- "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
- "src/core/lib/iomgr/ev_poll_posix.c",
- "src/core/lib/iomgr/ev_posix.c",
- "src/core/lib/iomgr/exec_ctx.c",
- "src/core/lib/iomgr/executor.c",
- "src/core/lib/iomgr/iocp_windows.c",
- "src/core/lib/iomgr/iomgr.c",
- "src/core/lib/iomgr/iomgr_posix.c",
- "src/core/lib/iomgr/iomgr_windows.c",
- "src/core/lib/iomgr/load_file.c",
- "src/core/lib/iomgr/network_status_tracker.c",
- "src/core/lib/iomgr/polling_entity.c",
- "src/core/lib/iomgr/pollset_set_windows.c",
- "src/core/lib/iomgr/pollset_windows.c",
- "src/core/lib/iomgr/resolve_address_posix.c",
- "src/core/lib/iomgr/resolve_address_windows.c",
- "src/core/lib/iomgr/sockaddr_utils.c",
- "src/core/lib/iomgr/socket_utils_common_posix.c",
- "src/core/lib/iomgr/socket_utils_linux.c",
- "src/core/lib/iomgr/socket_utils_posix.c",
- "src/core/lib/iomgr/socket_windows.c",
- "src/core/lib/iomgr/tcp_client_posix.c",
- "src/core/lib/iomgr/tcp_client_windows.c",
- "src/core/lib/iomgr/tcp_posix.c",
- "src/core/lib/iomgr/tcp_server_posix.c",
- "src/core/lib/iomgr/tcp_server_windows.c",
- "src/core/lib/iomgr/tcp_windows.c",
- "src/core/lib/iomgr/time_averaged_stats.c",
- "src/core/lib/iomgr/timer.c",
- "src/core/lib/iomgr/timer_heap.c",
- "src/core/lib/iomgr/udp_server.c",
- "src/core/lib/iomgr/unix_sockets_posix.c",
- "src/core/lib/iomgr/unix_sockets_posix_noop.c",
- "src/core/lib/iomgr/wakeup_fd_eventfd.c",
- "src/core/lib/iomgr/wakeup_fd_nospecial.c",
- "src/core/lib/iomgr/wakeup_fd_pipe.c",
- "src/core/lib/iomgr/wakeup_fd_posix.c",
- "src/core/lib/iomgr/workqueue_posix.c",
- "src/core/lib/iomgr/workqueue_windows.c",
- "src/core/lib/json/json.c",
- "src/core/lib/json/json_reader.c",
- "src/core/lib/json/json_string.c",
- "src/core/lib/json/json_writer.c",
- "src/core/lib/surface/alarm.c",
- "src/core/lib/surface/api_trace.c",
- "src/core/lib/surface/byte_buffer.c",
- "src/core/lib/surface/byte_buffer_reader.c",
- "src/core/lib/surface/call.c",
- "src/core/lib/surface/call_details.c",
- "src/core/lib/surface/call_log_batch.c",
- "src/core/lib/surface/channel.c",
- "src/core/lib/surface/channel_init.c",
- "src/core/lib/surface/channel_ping.c",
- "src/core/lib/surface/channel_stack_type.c",
- "src/core/lib/surface/completion_queue.c",
- "src/core/lib/surface/event_string.c",
- "src/core/lib/surface/lame_client.c",
- "src/core/lib/surface/metadata_array.c",
- "src/core/lib/surface/server.c",
- "src/core/lib/surface/validate_metadata.c",
- "src/core/lib/surface/version.c",
- "src/core/lib/transport/byte_stream.c",
- "src/core/lib/transport/connectivity_state.c",
- "src/core/lib/transport/metadata.c",
- "src/core/lib/transport/metadata_batch.c",
- "src/core/lib/transport/static_metadata.c",
- "src/core/lib/transport/transport.c",
- "src/core/lib/transport/transport_op_string.c",
- "src/core/lib/http/httpcli_security_connector.c",
- "src/core/lib/security/context/security_context.c",
- "src/core/lib/security/credentials/composite/composite_credentials.c",
- "src/core/lib/security/credentials/credentials.c",
- "src/core/lib/security/credentials/credentials_metadata.c",
- "src/core/lib/security/credentials/fake/fake_credentials.c",
- "src/core/lib/security/credentials/google_default/credentials_posix.c",
- "src/core/lib/security/credentials/google_default/credentials_windows.c",
- "src/core/lib/security/credentials/google_default/google_default_credentials.c",
- "src/core/lib/security/credentials/iam/iam_credentials.c",
- "src/core/lib/security/credentials/jwt/json_token.c",
- "src/core/lib/security/credentials/jwt/jwt_credentials.c",
- "src/core/lib/security/credentials/jwt/jwt_verifier.c",
- "src/core/lib/security/credentials/oauth2/oauth2_credentials.c",
- "src/core/lib/security/credentials/plugin/plugin_credentials.c",
- "src/core/lib/security/credentials/ssl/ssl_credentials.c",
- "src/core/lib/security/transport/client_auth_filter.c",
- "src/core/lib/security/transport/handshake.c",
- "src/core/lib/security/transport/secure_endpoint.c",
- "src/core/lib/security/transport/security_connector.c",
- "src/core/lib/security/transport/server_auth_filter.c",
- "src/core/lib/security/transport/tsi_error.c",
- "src/core/lib/security/util/b64.c",
- "src/core/lib/security/util/json_util.c",
- "src/core/lib/surface/init_secure.c",
- "src/core/ext/transport/chttp2/alpn/alpn.c",
- "src/core/lib/tsi/fake_transport_security.c",
- "src/core/lib/tsi/ssl_transport_security.c",
- "src/core/lib/tsi/transport_security.c",
"src/cpp/codegen/codegen_init.cc",
],
hdrs = [
@@ -1587,14 +1368,6 @@ cc_library(
"include/grpc/impl/codegen/sync_posix.h",
"include/grpc/impl/codegen/sync_windows.h",
"include/grpc/impl/codegen/time.h",
- "include/grpc/byte_buffer.h",
- "include/grpc/byte_buffer_reader.h",
- "include/grpc/compression.h",
- "include/grpc/grpc.h",
- "include/grpc/grpc_posix.h",
- "include/grpc/status.h",
- "include/grpc/grpc_security.h",
- "include/grpc/grpc_security_constants.h",
],
includes = [
"include",
@@ -1604,7 +1377,6 @@ cc_library(
"//external:libssl",
"//external:protobuf_clib",
":grpc",
- ":gpr",
],
)
@@ -1694,109 +1466,6 @@ cc_library(
"src/cpp/client/create_channel_internal.h",
"src/cpp/server/dynamic_thread_pool.h",
"src/cpp/server/thread_pool_interface.h",
- "src/core/lib/channel/channel_args.h",
- "src/core/lib/channel/channel_stack.h",
- "src/core/lib/channel/channel_stack_builder.h",
- "src/core/lib/channel/compress_filter.h",
- "src/core/lib/channel/connected_channel.h",
- "src/core/lib/channel/context.h",
- "src/core/lib/channel/http_client_filter.h",
- "src/core/lib/channel/http_server_filter.h",
- "src/core/lib/compression/algorithm_metadata.h",
- "src/core/lib/compression/message_compress.h",
- "src/core/lib/debug/trace.h",
- "src/core/lib/http/format_request.h",
- "src/core/lib/http/httpcli.h",
- "src/core/lib/http/parser.h",
- "src/core/lib/iomgr/closure.h",
- "src/core/lib/iomgr/endpoint.h",
- "src/core/lib/iomgr/endpoint_pair.h",
- "src/core/lib/iomgr/error.h",
- "src/core/lib/iomgr/ev_epoll_linux.h",
- "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
- "src/core/lib/iomgr/ev_poll_posix.h",
- "src/core/lib/iomgr/ev_posix.h",
- "src/core/lib/iomgr/exec_ctx.h",
- "src/core/lib/iomgr/executor.h",
- "src/core/lib/iomgr/iocp_windows.h",
- "src/core/lib/iomgr/iomgr.h",
- "src/core/lib/iomgr/iomgr_internal.h",
- "src/core/lib/iomgr/iomgr_posix.h",
- "src/core/lib/iomgr/load_file.h",
- "src/core/lib/iomgr/network_status_tracker.h",
- "src/core/lib/iomgr/polling_entity.h",
- "src/core/lib/iomgr/pollset.h",
- "src/core/lib/iomgr/pollset_set.h",
- "src/core/lib/iomgr/pollset_set_windows.h",
- "src/core/lib/iomgr/pollset_windows.h",
- "src/core/lib/iomgr/resolve_address.h",
- "src/core/lib/iomgr/sockaddr.h",
- "src/core/lib/iomgr/sockaddr_posix.h",
- "src/core/lib/iomgr/sockaddr_utils.h",
- "src/core/lib/iomgr/sockaddr_windows.h",
- "src/core/lib/iomgr/socket_utils_posix.h",
- "src/core/lib/iomgr/socket_windows.h",
- "src/core/lib/iomgr/tcp_client.h",
- "src/core/lib/iomgr/tcp_posix.h",
- "src/core/lib/iomgr/tcp_server.h",
- "src/core/lib/iomgr/tcp_windows.h",
- "src/core/lib/iomgr/time_averaged_stats.h",
- "src/core/lib/iomgr/timer.h",
- "src/core/lib/iomgr/timer_heap.h",
- "src/core/lib/iomgr/udp_server.h",
- "src/core/lib/iomgr/unix_sockets_posix.h",
- "src/core/lib/iomgr/wakeup_fd_pipe.h",
- "src/core/lib/iomgr/wakeup_fd_posix.h",
- "src/core/lib/iomgr/workqueue.h",
- "src/core/lib/iomgr/workqueue_posix.h",
- "src/core/lib/iomgr/workqueue_windows.h",
- "src/core/lib/json/json.h",
- "src/core/lib/json/json_common.h",
- "src/core/lib/json/json_reader.h",
- "src/core/lib/json/json_writer.h",
- "src/core/lib/surface/api_trace.h",
- "src/core/lib/surface/call.h",
- "src/core/lib/surface/call_test_only.h",
- "src/core/lib/surface/channel.h",
- "src/core/lib/surface/channel_init.h",
- "src/core/lib/surface/channel_stack_type.h",
- "src/core/lib/surface/completion_queue.h",
- "src/core/lib/surface/event_string.h",
- "src/core/lib/surface/init.h",
- "src/core/lib/surface/lame_client.h",
- "src/core/lib/surface/server.h",
- "src/core/lib/transport/byte_stream.h",
- "src/core/lib/transport/connectivity_state.h",
- "src/core/lib/transport/metadata.h",
- "src/core/lib/transport/metadata_batch.h",
- "src/core/lib/transport/static_metadata.h",
- "src/core/lib/transport/transport.h",
- "src/core/lib/transport/transport_impl.h",
- "src/core/lib/security/context/security_context.h",
- "src/core/lib/security/credentials/composite/composite_credentials.h",
- "src/core/lib/security/credentials/credentials.h",
- "src/core/lib/security/credentials/fake/fake_credentials.h",
- "src/core/lib/security/credentials/google_default/google_default_credentials.h",
- "src/core/lib/security/credentials/iam/iam_credentials.h",
- "src/core/lib/security/credentials/jwt/json_token.h",
- "src/core/lib/security/credentials/jwt/jwt_credentials.h",
- "src/core/lib/security/credentials/jwt/jwt_verifier.h",
- "src/core/lib/security/credentials/oauth2/oauth2_credentials.h",
- "src/core/lib/security/credentials/plugin/plugin_credentials.h",
- "src/core/lib/security/credentials/ssl/ssl_credentials.h",
- "src/core/lib/security/transport/auth_filters.h",
- "src/core/lib/security/transport/handshake.h",
- "src/core/lib/security/transport/secure_endpoint.h",
- "src/core/lib/security/transport/security_connector.h",
- "src/core/lib/security/transport/tsi_error.h",
- "src/core/lib/security/util/b64.h",
- "src/core/lib/security/util/json_util.h",
- "src/core/ext/transport/chttp2/alpn/alpn.h",
- "src/core/lib/tsi/fake_transport_security.h",
- "src/core/lib/tsi/ssl_transport_security.h",
- "src/core/lib/tsi/ssl_types.h",
- "src/core/lib/tsi/transport_security.h",
- "src/core/lib/tsi/transport_security_interface.h",
"src/cpp/common/insecure_create_auth_context.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/client_context.cc",
@@ -1824,122 +1493,6 @@ cc_library(
"src/cpp/util/status.cc",
"src/cpp/util/string_ref.cc",
"src/cpp/util/time.cc",
- "src/core/lib/channel/channel_args.c",
- "src/core/lib/channel/channel_stack.c",
- "src/core/lib/channel/channel_stack_builder.c",
- "src/core/lib/channel/compress_filter.c",
- "src/core/lib/channel/connected_channel.c",
- "src/core/lib/channel/http_client_filter.c",
- "src/core/lib/channel/http_server_filter.c",
- "src/core/lib/compression/compression.c",
- "src/core/lib/compression/message_compress.c",
- "src/core/lib/debug/trace.c",
- "src/core/lib/http/format_request.c",
- "src/core/lib/http/httpcli.c",
- "src/core/lib/http/parser.c",
- "src/core/lib/iomgr/closure.c",
- "src/core/lib/iomgr/endpoint.c",
- "src/core/lib/iomgr/endpoint_pair_posix.c",
- "src/core/lib/iomgr/endpoint_pair_windows.c",
- "src/core/lib/iomgr/error.c",
- "src/core/lib/iomgr/ev_epoll_linux.c",
- "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
- "src/core/lib/iomgr/ev_poll_posix.c",
- "src/core/lib/iomgr/ev_posix.c",
- "src/core/lib/iomgr/exec_ctx.c",
- "src/core/lib/iomgr/executor.c",
- "src/core/lib/iomgr/iocp_windows.c",
- "src/core/lib/iomgr/iomgr.c",
- "src/core/lib/iomgr/iomgr_posix.c",
- "src/core/lib/iomgr/iomgr_windows.c",
- "src/core/lib/iomgr/load_file.c",
- "src/core/lib/iomgr/network_status_tracker.c",
- "src/core/lib/iomgr/polling_entity.c",
- "src/core/lib/iomgr/pollset_set_windows.c",
- "src/core/lib/iomgr/pollset_windows.c",
- "src/core/lib/iomgr/resolve_address_posix.c",
- "src/core/lib/iomgr/resolve_address_windows.c",
- "src/core/lib/iomgr/sockaddr_utils.c",
- "src/core/lib/iomgr/socket_utils_common_posix.c",
- "src/core/lib/iomgr/socket_utils_linux.c",
- "src/core/lib/iomgr/socket_utils_posix.c",
- "src/core/lib/iomgr/socket_windows.c",
- "src/core/lib/iomgr/tcp_client_posix.c",
- "src/core/lib/iomgr/tcp_client_windows.c",
- "src/core/lib/iomgr/tcp_posix.c",
- "src/core/lib/iomgr/tcp_server_posix.c",
- "src/core/lib/iomgr/tcp_server_windows.c",
- "src/core/lib/iomgr/tcp_windows.c",
- "src/core/lib/iomgr/time_averaged_stats.c",
- "src/core/lib/iomgr/timer.c",
- "src/core/lib/iomgr/timer_heap.c",
- "src/core/lib/iomgr/udp_server.c",
- "src/core/lib/iomgr/unix_sockets_posix.c",
- "src/core/lib/iomgr/unix_sockets_posix_noop.c",
- "src/core/lib/iomgr/wakeup_fd_eventfd.c",
- "src/core/lib/iomgr/wakeup_fd_nospecial.c",
- "src/core/lib/iomgr/wakeup_fd_pipe.c",
- "src/core/lib/iomgr/wakeup_fd_posix.c",
- "src/core/lib/iomgr/workqueue_posix.c",
- "src/core/lib/iomgr/workqueue_windows.c",
- "src/core/lib/json/json.c",
- "src/core/lib/json/json_reader.c",
- "src/core/lib/json/json_string.c",
- "src/core/lib/json/json_writer.c",
- "src/core/lib/surface/alarm.c",
- "src/core/lib/surface/api_trace.c",
- "src/core/lib/surface/byte_buffer.c",
- "src/core/lib/surface/byte_buffer_reader.c",
- "src/core/lib/surface/call.c",
- "src/core/lib/surface/call_details.c",
- "src/core/lib/surface/call_log_batch.c",
- "src/core/lib/surface/channel.c",
- "src/core/lib/surface/channel_init.c",
- "src/core/lib/surface/channel_ping.c",
- "src/core/lib/surface/channel_stack_type.c",
- "src/core/lib/surface/completion_queue.c",
- "src/core/lib/surface/event_string.c",
- "src/core/lib/surface/lame_client.c",
- "src/core/lib/surface/metadata_array.c",
- "src/core/lib/surface/server.c",
- "src/core/lib/surface/validate_metadata.c",
- "src/core/lib/surface/version.c",
- "src/core/lib/transport/byte_stream.c",
- "src/core/lib/transport/connectivity_state.c",
- "src/core/lib/transport/metadata.c",
- "src/core/lib/transport/metadata_batch.c",
- "src/core/lib/transport/static_metadata.c",
- "src/core/lib/transport/transport.c",
- "src/core/lib/transport/transport_op_string.c",
- "src/core/lib/http/httpcli_security_connector.c",
- "src/core/lib/security/context/security_context.c",
- "src/core/lib/security/credentials/composite/composite_credentials.c",
- "src/core/lib/security/credentials/credentials.c",
- "src/core/lib/security/credentials/credentials_metadata.c",
- "src/core/lib/security/credentials/fake/fake_credentials.c",
- "src/core/lib/security/credentials/google_default/credentials_posix.c",
- "src/core/lib/security/credentials/google_default/credentials_windows.c",
- "src/core/lib/security/credentials/google_default/google_default_credentials.c",
- "src/core/lib/security/credentials/iam/iam_credentials.c",
- "src/core/lib/security/credentials/jwt/json_token.c",
- "src/core/lib/security/credentials/jwt/jwt_credentials.c",
- "src/core/lib/security/credentials/jwt/jwt_verifier.c",
- "src/core/lib/security/credentials/oauth2/oauth2_credentials.c",
- "src/core/lib/security/credentials/plugin/plugin_credentials.c",
- "src/core/lib/security/credentials/ssl/ssl_credentials.c",
- "src/core/lib/security/transport/client_auth_filter.c",
- "src/core/lib/security/transport/handshake.c",
- "src/core/lib/security/transport/secure_endpoint.c",
- "src/core/lib/security/transport/security_connector.c",
- "src/core/lib/security/transport/server_auth_filter.c",
- "src/core/lib/security/transport/tsi_error.c",
- "src/core/lib/security/util/b64.c",
- "src/core/lib/security/util/json_util.c",
- "src/core/lib/surface/init_secure.c",
- "src/core/ext/transport/chttp2/alpn/alpn.c",
- "src/core/lib/tsi/fake_transport_security.c",
- "src/core/lib/tsi/ssl_transport_security.c",
- "src/core/lib/tsi/transport_security.c",
"src/cpp/codegen/codegen_init.cc",
],
hdrs = [
@@ -2041,14 +1594,6 @@ cc_library(
"include/grpc/impl/codegen/sync_posix.h",
"include/grpc/impl/codegen/sync_windows.h",
"include/grpc/impl/codegen/time.h",
- "include/grpc/byte_buffer.h",
- "include/grpc/byte_buffer_reader.h",
- "include/grpc/compression.h",
- "include/grpc/grpc.h",
- "include/grpc/grpc_posix.h",
- "include/grpc/status.h",
- "include/grpc/grpc_security.h",
- "include/grpc/grpc_security_constants.h",
],
includes = [
"include",
@@ -2058,6 +1603,7 @@ cc_library(
"//external:protobuf_clib",
":gpr",
":grpc_unsecure",
+ ":grpc",
],
)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fb8a199ad5..2c0059cd2d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -42,7 +42,7 @@
cmake_minimum_required(VERSION 2.8)
set(PACKAGE_NAME "grpc")
-set(PACKAGE_VERSION "0.16.0-dev")
+set(PACKAGE_VERSION "1.1.0-dev")
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_TARNAME "${PACKAGE_NAME}-${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/")
@@ -725,122 +725,6 @@ add_library(grpc++
src/cpp/util/status.cc
src/cpp/util/string_ref.cc
src/cpp/util/time.cc
- src/core/lib/channel/channel_args.c
- src/core/lib/channel/channel_stack.c
- src/core/lib/channel/channel_stack_builder.c
- src/core/lib/channel/compress_filter.c
- src/core/lib/channel/connected_channel.c
- src/core/lib/channel/http_client_filter.c
- src/core/lib/channel/http_server_filter.c
- src/core/lib/compression/compression.c
- src/core/lib/compression/message_compress.c
- src/core/lib/debug/trace.c
- src/core/lib/http/format_request.c
- src/core/lib/http/httpcli.c
- src/core/lib/http/parser.c
- src/core/lib/iomgr/closure.c
- src/core/lib/iomgr/endpoint.c
- src/core/lib/iomgr/endpoint_pair_posix.c
- src/core/lib/iomgr/endpoint_pair_windows.c
- src/core/lib/iomgr/error.c
- src/core/lib/iomgr/ev_epoll_linux.c
- src/core/lib/iomgr/ev_poll_and_epoll_posix.c
- src/core/lib/iomgr/ev_poll_posix.c
- src/core/lib/iomgr/ev_posix.c
- src/core/lib/iomgr/exec_ctx.c
- src/core/lib/iomgr/executor.c
- src/core/lib/iomgr/iocp_windows.c
- src/core/lib/iomgr/iomgr.c
- src/core/lib/iomgr/iomgr_posix.c
- src/core/lib/iomgr/iomgr_windows.c
- src/core/lib/iomgr/load_file.c
- src/core/lib/iomgr/network_status_tracker.c
- src/core/lib/iomgr/polling_entity.c
- src/core/lib/iomgr/pollset_set_windows.c
- src/core/lib/iomgr/pollset_windows.c
- src/core/lib/iomgr/resolve_address_posix.c
- src/core/lib/iomgr/resolve_address_windows.c
- src/core/lib/iomgr/sockaddr_utils.c
- src/core/lib/iomgr/socket_utils_common_posix.c
- src/core/lib/iomgr/socket_utils_linux.c
- src/core/lib/iomgr/socket_utils_posix.c
- src/core/lib/iomgr/socket_windows.c
- src/core/lib/iomgr/tcp_client_posix.c
- src/core/lib/iomgr/tcp_client_windows.c
- src/core/lib/iomgr/tcp_posix.c
- src/core/lib/iomgr/tcp_server_posix.c
- src/core/lib/iomgr/tcp_server_windows.c
- src/core/lib/iomgr/tcp_windows.c
- src/core/lib/iomgr/time_averaged_stats.c
- src/core/lib/iomgr/timer.c
- src/core/lib/iomgr/timer_heap.c
- src/core/lib/iomgr/udp_server.c
- src/core/lib/iomgr/unix_sockets_posix.c
- src/core/lib/iomgr/unix_sockets_posix_noop.c
- src/core/lib/iomgr/wakeup_fd_eventfd.c
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_posix.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c
- src/core/lib/json/json_string.c
- src/core/lib/json/json_writer.c
- src/core/lib/surface/alarm.c
- src/core/lib/surface/api_trace.c
- src/core/lib/surface/byte_buffer.c
- src/core/lib/surface/byte_buffer_reader.c
- src/core/lib/surface/call.c
- src/core/lib/surface/call_details.c
- src/core/lib/surface/call_log_batch.c
- src/core/lib/surface/channel.c
- src/core/lib/surface/channel_init.c
- src/core/lib/surface/channel_ping.c
- src/core/lib/surface/channel_stack_type.c
- src/core/lib/surface/completion_queue.c
- src/core/lib/surface/event_string.c
- src/core/lib/surface/lame_client.c
- src/core/lib/surface/metadata_array.c
- src/core/lib/surface/server.c
- src/core/lib/surface/validate_metadata.c
- src/core/lib/surface/version.c
- src/core/lib/transport/byte_stream.c
- src/core/lib/transport/connectivity_state.c
- src/core/lib/transport/metadata.c
- src/core/lib/transport/metadata_batch.c
- src/core/lib/transport/static_metadata.c
- src/core/lib/transport/transport.c
- src/core/lib/transport/transport_op_string.c
- src/core/lib/http/httpcli_security_connector.c
- src/core/lib/security/context/security_context.c
- src/core/lib/security/credentials/composite/composite_credentials.c
- src/core/lib/security/credentials/credentials.c
- src/core/lib/security/credentials/credentials_metadata.c
- src/core/lib/security/credentials/fake/fake_credentials.c
- src/core/lib/security/credentials/google_default/credentials_posix.c
- src/core/lib/security/credentials/google_default/credentials_windows.c
- src/core/lib/security/credentials/google_default/google_default_credentials.c
- src/core/lib/security/credentials/iam/iam_credentials.c
- src/core/lib/security/credentials/jwt/json_token.c
- src/core/lib/security/credentials/jwt/jwt_credentials.c
- src/core/lib/security/credentials/jwt/jwt_verifier.c
- src/core/lib/security/credentials/oauth2/oauth2_credentials.c
- src/core/lib/security/credentials/plugin/plugin_credentials.c
- src/core/lib/security/credentials/ssl/ssl_credentials.c
- src/core/lib/security/transport/client_auth_filter.c
- src/core/lib/security/transport/handshake.c
- src/core/lib/security/transport/secure_endpoint.c
- src/core/lib/security/transport/security_connector.c
- src/core/lib/security/transport/server_auth_filter.c
- src/core/lib/security/transport/tsi_error.c
- src/core/lib/security/util/b64.c
- src/core/lib/security/util/json_util.c
- src/core/lib/surface/init_secure.c
- src/core/ext/transport/chttp2/alpn/alpn.c
- src/core/lib/tsi/fake_transport_security.c
- src/core/lib/tsi/ssl_transport_security.c
- src/core/lib/tsi/transport_security.c
src/cpp/codegen/codegen_init.cc
)
@@ -857,7 +741,6 @@ target_link_libraries(grpc++
ssl
libprotobuf
grpc
- gpr
)
@@ -910,122 +793,6 @@ add_library(grpc++_unsecure
src/cpp/util/status.cc
src/cpp/util/string_ref.cc
src/cpp/util/time.cc
- src/core/lib/channel/channel_args.c
- src/core/lib/channel/channel_stack.c
- src/core/lib/channel/channel_stack_builder.c
- src/core/lib/channel/compress_filter.c
- src/core/lib/channel/connected_channel.c
- src/core/lib/channel/http_client_filter.c
- src/core/lib/channel/http_server_filter.c
- src/core/lib/compression/compression.c
- src/core/lib/compression/message_compress.c
- src/core/lib/debug/trace.c
- src/core/lib/http/format_request.c
- src/core/lib/http/httpcli.c
- src/core/lib/http/parser.c
- src/core/lib/iomgr/closure.c
- src/core/lib/iomgr/endpoint.c
- src/core/lib/iomgr/endpoint_pair_posix.c
- src/core/lib/iomgr/endpoint_pair_windows.c
- src/core/lib/iomgr/error.c
- src/core/lib/iomgr/ev_epoll_linux.c
- src/core/lib/iomgr/ev_poll_and_epoll_posix.c
- src/core/lib/iomgr/ev_poll_posix.c
- src/core/lib/iomgr/ev_posix.c
- src/core/lib/iomgr/exec_ctx.c
- src/core/lib/iomgr/executor.c
- src/core/lib/iomgr/iocp_windows.c
- src/core/lib/iomgr/iomgr.c
- src/core/lib/iomgr/iomgr_posix.c
- src/core/lib/iomgr/iomgr_windows.c
- src/core/lib/iomgr/load_file.c
- src/core/lib/iomgr/network_status_tracker.c
- src/core/lib/iomgr/polling_entity.c
- src/core/lib/iomgr/pollset_set_windows.c
- src/core/lib/iomgr/pollset_windows.c
- src/core/lib/iomgr/resolve_address_posix.c
- src/core/lib/iomgr/resolve_address_windows.c
- src/core/lib/iomgr/sockaddr_utils.c
- src/core/lib/iomgr/socket_utils_common_posix.c
- src/core/lib/iomgr/socket_utils_linux.c
- src/core/lib/iomgr/socket_utils_posix.c
- src/core/lib/iomgr/socket_windows.c
- src/core/lib/iomgr/tcp_client_posix.c
- src/core/lib/iomgr/tcp_client_windows.c
- src/core/lib/iomgr/tcp_posix.c
- src/core/lib/iomgr/tcp_server_posix.c
- src/core/lib/iomgr/tcp_server_windows.c
- src/core/lib/iomgr/tcp_windows.c
- src/core/lib/iomgr/time_averaged_stats.c
- src/core/lib/iomgr/timer.c
- src/core/lib/iomgr/timer_heap.c
- src/core/lib/iomgr/udp_server.c
- src/core/lib/iomgr/unix_sockets_posix.c
- src/core/lib/iomgr/unix_sockets_posix_noop.c
- src/core/lib/iomgr/wakeup_fd_eventfd.c
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_posix.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c
- src/core/lib/json/json_string.c
- src/core/lib/json/json_writer.c
- src/core/lib/surface/alarm.c
- src/core/lib/surface/api_trace.c
- src/core/lib/surface/byte_buffer.c
- src/core/lib/surface/byte_buffer_reader.c
- src/core/lib/surface/call.c
- src/core/lib/surface/call_details.c
- src/core/lib/surface/call_log_batch.c
- src/core/lib/surface/channel.c
- src/core/lib/surface/channel_init.c
- src/core/lib/surface/channel_ping.c
- src/core/lib/surface/channel_stack_type.c
- src/core/lib/surface/completion_queue.c
- src/core/lib/surface/event_string.c
- src/core/lib/surface/lame_client.c
- src/core/lib/surface/metadata_array.c
- src/core/lib/surface/server.c
- src/core/lib/surface/validate_metadata.c
- src/core/lib/surface/version.c
- src/core/lib/transport/byte_stream.c
- src/core/lib/transport/connectivity_state.c
- src/core/lib/transport/metadata.c
- src/core/lib/transport/metadata_batch.c
- src/core/lib/transport/static_metadata.c
- src/core/lib/transport/transport.c
- src/core/lib/transport/transport_op_string.c
- src/core/lib/http/httpcli_security_connector.c
- src/core/lib/security/context/security_context.c
- src/core/lib/security/credentials/composite/composite_credentials.c
- src/core/lib/security/credentials/credentials.c
- src/core/lib/security/credentials/credentials_metadata.c
- src/core/lib/security/credentials/fake/fake_credentials.c
- src/core/lib/security/credentials/google_default/credentials_posix.c
- src/core/lib/security/credentials/google_default/credentials_windows.c
- src/core/lib/security/credentials/google_default/google_default_credentials.c
- src/core/lib/security/credentials/iam/iam_credentials.c
- src/core/lib/security/credentials/jwt/json_token.c
- src/core/lib/security/credentials/jwt/jwt_credentials.c
- src/core/lib/security/credentials/jwt/jwt_verifier.c
- src/core/lib/security/credentials/oauth2/oauth2_credentials.c
- src/core/lib/security/credentials/plugin/plugin_credentials.c
- src/core/lib/security/credentials/ssl/ssl_credentials.c
- src/core/lib/security/transport/client_auth_filter.c
- src/core/lib/security/transport/handshake.c
- src/core/lib/security/transport/secure_endpoint.c
- src/core/lib/security/transport/security_connector.c
- src/core/lib/security/transport/server_auth_filter.c
- src/core/lib/security/transport/tsi_error.c
- src/core/lib/security/util/b64.c
- src/core/lib/security/util/json_util.c
- src/core/lib/surface/init_secure.c
- src/core/ext/transport/chttp2/alpn/alpn.c
- src/core/lib/tsi/fake_transport_security.c
- src/core/lib/tsi/ssl_transport_security.c
- src/core/lib/tsi/transport_security.c
src/cpp/codegen/codegen_init.cc
)
@@ -1042,6 +809,7 @@ target_link_libraries(grpc++_unsecure
libprotobuf
gpr
grpc_unsecure
+ grpc
)
diff --git a/Makefile b/Makefile
index 51f5c5e44c..4ce22678d7 100644
--- a/Makefile
+++ b/Makefile
@@ -415,7 +415,7 @@ E = @echo
Q = @
endif
-VERSION = 0.16.0-dev
+VERSION = 1.1.0-dev
CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -448,7 +448,7 @@ PC_TEMPLATE = prefix=$(prefix),exec_prefix=\$${prefix},includedir=\$${prefix}/in
ifeq ($(SYSTEM),MINGW32)
SHARED_EXT = dll
SHARED_PREFIX =
-SHARED_VERSION = -0
+SHARED_VERSION = -1
else ifeq ($(SYSTEM),Darwin)
SHARED_EXT = dylib
SHARED_PREFIX = lib
@@ -991,7 +991,6 @@ transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test
udp_server_test: $(BINDIR)/$(CONFIG)/udp_server_test
uri_fuzzer_test: $(BINDIR)/$(CONFIG)/uri_fuzzer_test
uri_parser_test: $(BINDIR)/$(CONFIG)/uri_parser_test
-workqueue_test: $(BINDIR)/$(CONFIG)/workqueue_test
alarm_cpp_test: $(BINDIR)/$(CONFIG)/alarm_cpp_test
async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test
auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test
@@ -1295,7 +1294,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/transport_security_test \
$(BINDIR)/$(CONFIG)/udp_server_test \
$(BINDIR)/$(CONFIG)/uri_parser_test \
- $(BINDIR)/$(CONFIG)/workqueue_test \
$(BINDIR)/$(CONFIG)/public_headers_must_be_c89 \
$(BINDIR)/$(CONFIG)/badreq_bad_client_test \
$(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test \
@@ -1674,8 +1672,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/udp_server_test || ( echo test udp_server_test failed ; exit 1 )
$(E) "[RUN] Testing uri_parser_test"
$(Q) $(BINDIR)/$(CONFIG)/uri_parser_test || ( echo test uri_parser_test failed ; exit 1 )
- $(E) "[RUN] Testing workqueue_test"
- $(Q) $(BINDIR)/$(CONFIG)/workqueue_test || ( echo test workqueue_test failed ; exit 1 )
$(E) "[RUN] Testing public_headers_must_be_c89"
$(Q) $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 || ( echo test public_headers_must_be_c89 failed ; exit 1 )
$(E) "[RUN] Testing badreq_bad_client_test"
@@ -2164,7 +2160,7 @@ install-shared_c: shared_c strip-shared_c install-pkg-config_c
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgpr-imp.a $(prefix)/lib/libgpr-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.1
$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so
endif
$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2173,7 +2169,7 @@ endif
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc-imp.a $(prefix)/lib/libgrpc-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so
endif
$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2182,7 +2178,7 @@ endif
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_cronet-imp.a $(prefix)/lib/libgrpc_cronet-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so
endif
$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2191,7 +2187,7 @@ endif
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure-imp.a $(prefix)/lib/libgrpc_unsecure-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so
endif
ifneq ($(SYSTEM),MINGW32)
@@ -2208,7 +2204,7 @@ install-shared_cxx: shared_cxx strip-shared_cxx install-shared_c install-pkg-con
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++-imp.a $(prefix)/lib/libgrpc++-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so
endif
$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2217,7 +2213,7 @@ endif
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection-imp.a $(prefix)/lib/libgrpc++_reflection-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so
endif
$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2226,7 +2222,7 @@ endif
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure-imp.a $(prefix)/lib/libgrpc++_unsecure-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so
endif
ifneq ($(SYSTEM),MINGW32)
@@ -2243,7 +2239,7 @@ install-shared_csharp: shared_csharp strip-shared_csharp
ifeq ($(SYSTEM),MINGW32)
$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext-imp.a $(prefix)/lib/libgrpc_csharp_ext-imp.a
else ifneq ($(SYSTEM),Darwin)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.0
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so
endif
ifneq ($(SYSTEM),MINGW32)
@@ -2436,8 +2432,8 @@ $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT): $(LIBGPR_OBJS) $(ZLI
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
else
- $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.0 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
- $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.0
+ $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.1 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
+ $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so
endif
endif
@@ -2729,8 +2725,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_OBJS) $(Z
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
else
- $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
- $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.0
+ $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
+ $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so
endif
endif
@@ -2978,8 +2974,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_CRO
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
else
- $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.0
+ $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so
endif
endif
@@ -3299,8 +3295,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_U
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
else
- $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.0
+ $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so
endif
endif
@@ -3421,122 +3417,6 @@ LIBGRPC++_SRC = \
src/cpp/util/status.cc \
src/cpp/util/string_ref.cc \
src/cpp/util/time.cc \
- src/core/lib/channel/channel_args.c \
- src/core/lib/channel/channel_stack.c \
- src/core/lib/channel/channel_stack_builder.c \
- src/core/lib/channel/compress_filter.c \
- src/core/lib/channel/connected_channel.c \
- src/core/lib/channel/http_client_filter.c \
- src/core/lib/channel/http_server_filter.c \
- src/core/lib/compression/compression.c \
- src/core/lib/compression/message_compress.c \
- src/core/lib/debug/trace.c \
- src/core/lib/http/format_request.c \
- src/core/lib/http/httpcli.c \
- src/core/lib/http/parser.c \
- src/core/lib/iomgr/closure.c \
- src/core/lib/iomgr/endpoint.c \
- src/core/lib/iomgr/endpoint_pair_posix.c \
- src/core/lib/iomgr/endpoint_pair_windows.c \
- src/core/lib/iomgr/error.c \
- src/core/lib/iomgr/ev_epoll_linux.c \
- src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
- src/core/lib/iomgr/ev_poll_posix.c \
- src/core/lib/iomgr/ev_posix.c \
- src/core/lib/iomgr/exec_ctx.c \
- src/core/lib/iomgr/executor.c \
- src/core/lib/iomgr/iocp_windows.c \
- src/core/lib/iomgr/iomgr.c \
- src/core/lib/iomgr/iomgr_posix.c \
- src/core/lib/iomgr/iomgr_windows.c \
- src/core/lib/iomgr/load_file.c \
- src/core/lib/iomgr/network_status_tracker.c \
- src/core/lib/iomgr/polling_entity.c \
- src/core/lib/iomgr/pollset_set_windows.c \
- src/core/lib/iomgr/pollset_windows.c \
- src/core/lib/iomgr/resolve_address_posix.c \
- src/core/lib/iomgr/resolve_address_windows.c \
- src/core/lib/iomgr/sockaddr_utils.c \
- src/core/lib/iomgr/socket_utils_common_posix.c \
- src/core/lib/iomgr/socket_utils_linux.c \
- src/core/lib/iomgr/socket_utils_posix.c \
- src/core/lib/iomgr/socket_windows.c \
- src/core/lib/iomgr/tcp_client_posix.c \
- src/core/lib/iomgr/tcp_client_windows.c \
- src/core/lib/iomgr/tcp_posix.c \
- src/core/lib/iomgr/tcp_server_posix.c \
- src/core/lib/iomgr/tcp_server_windows.c \
- src/core/lib/iomgr/tcp_windows.c \
- src/core/lib/iomgr/time_averaged_stats.c \
- src/core/lib/iomgr/timer.c \
- src/core/lib/iomgr/timer_heap.c \
- src/core/lib/iomgr/udp_server.c \
- src/core/lib/iomgr/unix_sockets_posix.c \
- src/core/lib/iomgr/unix_sockets_posix_noop.c \
- src/core/lib/iomgr/wakeup_fd_eventfd.c \
- src/core/lib/iomgr/wakeup_fd_nospecial.c \
- src/core/lib/iomgr/wakeup_fd_pipe.c \
- src/core/lib/iomgr/wakeup_fd_posix.c \
- src/core/lib/iomgr/workqueue_posix.c \
- src/core/lib/iomgr/workqueue_windows.c \
- src/core/lib/json/json.c \
- src/core/lib/json/json_reader.c \
- src/core/lib/json/json_string.c \
- src/core/lib/json/json_writer.c \
- src/core/lib/surface/alarm.c \
- src/core/lib/surface/api_trace.c \
- src/core/lib/surface/byte_buffer.c \
- src/core/lib/surface/byte_buffer_reader.c \
- src/core/lib/surface/call.c \
- src/core/lib/surface/call_details.c \
- src/core/lib/surface/call_log_batch.c \
- src/core/lib/surface/channel.c \
- src/core/lib/surface/channel_init.c \
- src/core/lib/surface/channel_ping.c \
- src/core/lib/surface/channel_stack_type.c \
- src/core/lib/surface/completion_queue.c \
- src/core/lib/surface/event_string.c \
- src/core/lib/surface/lame_client.c \
- src/core/lib/surface/metadata_array.c \
- src/core/lib/surface/server.c \
- src/core/lib/surface/validate_metadata.c \
- src/core/lib/surface/version.c \
- src/core/lib/transport/byte_stream.c \
- src/core/lib/transport/connectivity_state.c \
- src/core/lib/transport/metadata.c \
- src/core/lib/transport/metadata_batch.c \
- src/core/lib/transport/static_metadata.c \
- src/core/lib/transport/transport.c \
- src/core/lib/transport/transport_op_string.c \
- src/core/lib/http/httpcli_security_connector.c \
- src/core/lib/security/context/security_context.c \
- src/core/lib/security/credentials/composite/composite_credentials.c \
- src/core/lib/security/credentials/credentials.c \
- src/core/lib/security/credentials/credentials_metadata.c \
- src/core/lib/security/credentials/fake/fake_credentials.c \
- src/core/lib/security/credentials/google_default/credentials_posix.c \
- src/core/lib/security/credentials/google_default/credentials_windows.c \
- src/core/lib/security/credentials/google_default/google_default_credentials.c \
- src/core/lib/security/credentials/iam/iam_credentials.c \
- src/core/lib/security/credentials/jwt/json_token.c \
- src/core/lib/security/credentials/jwt/jwt_credentials.c \
- src/core/lib/security/credentials/jwt/jwt_verifier.c \
- src/core/lib/security/credentials/oauth2/oauth2_credentials.c \
- src/core/lib/security/credentials/plugin/plugin_credentials.c \
- src/core/lib/security/credentials/ssl/ssl_credentials.c \
- src/core/lib/security/transport/client_auth_filter.c \
- src/core/lib/security/transport/handshake.c \
- src/core/lib/security/transport/secure_endpoint.c \
- src/core/lib/security/transport/security_connector.c \
- src/core/lib/security/transport/server_auth_filter.c \
- src/core/lib/security/transport/tsi_error.c \
- src/core/lib/security/util/b64.c \
- src/core/lib/security/util/json_util.c \
- src/core/lib/surface/init_secure.c \
- src/core/ext/transport/chttp2/alpn/alpn.c \
- src/core/lib/tsi/fake_transport_security.c \
- src/core/lib/tsi/ssl_transport_security.c \
- src/core/lib/tsi/transport_security.c \
src/cpp/codegen/codegen_init.cc \
PUBLIC_HEADERS_CXX += \
@@ -3638,14 +3518,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
include/grpc/impl/codegen/time.h \
- include/grpc/byte_buffer.h \
- include/grpc/byte_buffer_reader.h \
- include/grpc/compression.h \
- include/grpc/grpc.h \
- include/grpc/grpc_posix.h \
- include/grpc/status.h \
- include/grpc/grpc_security.h \
- include/grpc/grpc_security_constants.h \
LIBGRPC++_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_SRC))))
@@ -3682,19 +3554,19 @@ endif
ifeq ($(SYSTEM),MINGW32)
-$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/grpc.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/gpr.$(SHARED_EXT) $(OPENSSL_DEP)
+$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/grpc.$(SHARED_EXT) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc-imp -lgpr-imp
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc-imp
else
-$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgrpc.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT) $(OPENSSL_DEP)
+$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgrpc.$(SHARED_EXT) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc -lgpr
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
else
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc -lgpr
- $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.0
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so
endif
endif
@@ -3820,8 +3692,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT): $(LIBGR
ifeq ($(SYSTEM),Darwin)
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
else
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
- $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.0
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so
endif
endif
@@ -4032,122 +3904,6 @@ LIBGRPC++_UNSECURE_SRC = \
src/cpp/util/status.cc \
src/cpp/util/string_ref.cc \
src/cpp/util/time.cc \
- src/core/lib/channel/channel_args.c \
- src/core/lib/channel/channel_stack.c \
- src/core/lib/channel/channel_stack_builder.c \
- src/core/lib/channel/compress_filter.c \
- src/core/lib/channel/connected_channel.c \
- src/core/lib/channel/http_client_filter.c \
- src/core/lib/channel/http_server_filter.c \
- src/core/lib/compression/compression.c \
- src/core/lib/compression/message_compress.c \
- src/core/lib/debug/trace.c \
- src/core/lib/http/format_request.c \
- src/core/lib/http/httpcli.c \
- src/core/lib/http/parser.c \
- src/core/lib/iomgr/closure.c \
- src/core/lib/iomgr/endpoint.c \
- src/core/lib/iomgr/endpoint_pair_posix.c \
- src/core/lib/iomgr/endpoint_pair_windows.c \
- src/core/lib/iomgr/error.c \
- src/core/lib/iomgr/ev_epoll_linux.c \
- src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
- src/core/lib/iomgr/ev_poll_posix.c \
- src/core/lib/iomgr/ev_posix.c \
- src/core/lib/iomgr/exec_ctx.c \
- src/core/lib/iomgr/executor.c \
- src/core/lib/iomgr/iocp_windows.c \
- src/core/lib/iomgr/iomgr.c \
- src/core/lib/iomgr/iomgr_posix.c \
- src/core/lib/iomgr/iomgr_windows.c \
- src/core/lib/iomgr/load_file.c \
- src/core/lib/iomgr/network_status_tracker.c \
- src/core/lib/iomgr/polling_entity.c \
- src/core/lib/iomgr/pollset_set_windows.c \
- src/core/lib/iomgr/pollset_windows.c \
- src/core/lib/iomgr/resolve_address_posix.c \
- src/core/lib/iomgr/resolve_address_windows.c \
- src/core/lib/iomgr/sockaddr_utils.c \
- src/core/lib/iomgr/socket_utils_common_posix.c \
- src/core/lib/iomgr/socket_utils_linux.c \
- src/core/lib/iomgr/socket_utils_posix.c \
- src/core/lib/iomgr/socket_windows.c \
- src/core/lib/iomgr/tcp_client_posix.c \
- src/core/lib/iomgr/tcp_client_windows.c \
- src/core/lib/iomgr/tcp_posix.c \
- src/core/lib/iomgr/tcp_server_posix.c \
- src/core/lib/iomgr/tcp_server_windows.c \
- src/core/lib/iomgr/tcp_windows.c \
- src/core/lib/iomgr/time_averaged_stats.c \
- src/core/lib/iomgr/timer.c \
- src/core/lib/iomgr/timer_heap.c \
- src/core/lib/iomgr/udp_server.c \
- src/core/lib/iomgr/unix_sockets_posix.c \
- src/core/lib/iomgr/unix_sockets_posix_noop.c \
- src/core/lib/iomgr/wakeup_fd_eventfd.c \
- src/core/lib/iomgr/wakeup_fd_nospecial.c \
- src/core/lib/iomgr/wakeup_fd_pipe.c \
- src/core/lib/iomgr/wakeup_fd_posix.c \
- src/core/lib/iomgr/workqueue_posix.c \
- src/core/lib/iomgr/workqueue_windows.c \
- src/core/lib/json/json.c \
- src/core/lib/json/json_reader.c \
- src/core/lib/json/json_string.c \
- src/core/lib/json/json_writer.c \
- src/core/lib/surface/alarm.c \
- src/core/lib/surface/api_trace.c \
- src/core/lib/surface/byte_buffer.c \
- src/core/lib/surface/byte_buffer_reader.c \
- src/core/lib/surface/call.c \
- src/core/lib/surface/call_details.c \
- src/core/lib/surface/call_log_batch.c \
- src/core/lib/surface/channel.c \
- src/core/lib/surface/channel_init.c \
- src/core/lib/surface/channel_ping.c \
- src/core/lib/surface/channel_stack_type.c \
- src/core/lib/surface/completion_queue.c \
- src/core/lib/surface/event_string.c \
- src/core/lib/surface/lame_client.c \
- src/core/lib/surface/metadata_array.c \
- src/core/lib/surface/server.c \
- src/core/lib/surface/validate_metadata.c \
- src/core/lib/surface/version.c \
- src/core/lib/transport/byte_stream.c \
- src/core/lib/transport/connectivity_state.c \
- src/core/lib/transport/metadata.c \
- src/core/lib/transport/metadata_batch.c \
- src/core/lib/transport/static_metadata.c \
- src/core/lib/transport/transport.c \
- src/core/lib/transport/transport_op_string.c \
- src/core/lib/http/httpcli_security_connector.c \
- src/core/lib/security/context/security_context.c \
- src/core/lib/security/credentials/composite/composite_credentials.c \
- src/core/lib/security/credentials/credentials.c \
- src/core/lib/security/credentials/credentials_metadata.c \
- src/core/lib/security/credentials/fake/fake_credentials.c \
- src/core/lib/security/credentials/google_default/credentials_posix.c \
- src/core/lib/security/credentials/google_default/credentials_windows.c \
- src/core/lib/security/credentials/google_default/google_default_credentials.c \
- src/core/lib/security/credentials/iam/iam_credentials.c \
- src/core/lib/security/credentials/jwt/json_token.c \
- src/core/lib/security/credentials/jwt/jwt_credentials.c \
- src/core/lib/security/credentials/jwt/jwt_verifier.c \
- src/core/lib/security/credentials/oauth2/oauth2_credentials.c \
- src/core/lib/security/credentials/plugin/plugin_credentials.c \
- src/core/lib/security/credentials/ssl/ssl_credentials.c \
- src/core/lib/security/transport/client_auth_filter.c \
- src/core/lib/security/transport/handshake.c \
- src/core/lib/security/transport/secure_endpoint.c \
- src/core/lib/security/transport/security_connector.c \
- src/core/lib/security/transport/server_auth_filter.c \
- src/core/lib/security/transport/tsi_error.c \
- src/core/lib/security/util/b64.c \
- src/core/lib/security/util/json_util.c \
- src/core/lib/surface/init_secure.c \
- src/core/ext/transport/chttp2/alpn/alpn.c \
- src/core/lib/tsi/fake_transport_security.c \
- src/core/lib/tsi/ssl_transport_security.c \
- src/core/lib/tsi/transport_security.c \
src/cpp/codegen/codegen_init.cc \
PUBLIC_HEADERS_CXX += \
@@ -4249,14 +4005,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
include/grpc/impl/codegen/time.h \
- include/grpc/byte_buffer.h \
- include/grpc/byte_buffer_reader.h \
- include/grpc/compression.h \
- include/grpc/grpc.h \
- include/grpc/grpc_posix.h \
- include/grpc/status.h \
- include/grpc/grpc_security.h \
- include/grpc/grpc_security_constants.h \
LIBGRPC++_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_UNSECURE_SRC))))
@@ -4283,19 +4031,19 @@ endif
ifeq ($(SYSTEM),MINGW32)
-$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc_unsecure.$(SHARED_EXT)
+$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc_unsecure.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc.$(SHARED_EXT)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++_unsecure.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr-imp -lgrpc_unsecure-imp
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++_unsecure.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr-imp -lgrpc_unsecure-imp -lgrpc-imp
else
-$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT)
+$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc.$(SHARED_EXT)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc
else
- $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure
- $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.0
+ $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc
+ $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so
endif
endif
@@ -4724,8 +4472,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
else
- $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
- $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.0
+ $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
+ $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so
endif
endif
@@ -10423,38 +10171,6 @@ endif
endif
-WORKQUEUE_TEST_SRC = \
- test/core/iomgr/workqueue_test.c \
-
-WORKQUEUE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(WORKQUEUE_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/workqueue_test: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/workqueue_test: $(WORKQUEUE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
- $(E) "[LD] Linking $@"
- $(Q) mkdir -p `dirname $@`
- $(Q) $(LD) $(LDFLAGS) $(WORKQUEUE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/workqueue_test
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/core/iomgr/workqueue_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-
-deps_workqueue_test: $(WORKQUEUE_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(WORKQUEUE_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
ALARM_CPP_TEST_SRC = \
test/cpp/common/alarm_cpp_test.cc \
@@ -15099,6 +14815,34 @@ src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c: $(OPENSSL_DE
src/core/ext/transport/cronet/client/secure/cronet_channel_create.c: $(OPENSSL_DEP)
src/core/ext/transport/cronet/transport/cronet_api_dummy.c: $(OPENSSL_DEP)
src/core/ext/transport/cronet/transport/cronet_transport.c: $(OPENSSL_DEP)
+src/core/lib/http/httpcli_security_connector.c: $(OPENSSL_DEP)
+src/core/lib/security/context/security_context.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/composite/composite_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/credentials_metadata.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/fake/fake_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/google_default/credentials_posix.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/google_default/credentials_windows.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/google_default/google_default_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/iam/iam_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/jwt/json_token.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/jwt/jwt_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/jwt/jwt_verifier.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/oauth2/oauth2_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/plugin/plugin_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/credentials/ssl/ssl_credentials.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/client_auth_filter.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/handshake.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/secure_endpoint.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/security_connector.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/server_auth_filter.c: $(OPENSSL_DEP)
+src/core/lib/security/transport/tsi_error.c: $(OPENSSL_DEP)
+src/core/lib/security/util/b64.c: $(OPENSSL_DEP)
+src/core/lib/security/util/json_util.c: $(OPENSSL_DEP)
+src/core/lib/surface/init_secure.c: $(OPENSSL_DEP)
+src/core/lib/tsi/fake_transport_security.c: $(OPENSSL_DEP)
+src/core/lib/tsi/ssl_transport_security.c: $(OPENSSL_DEP)
+src/core/lib/tsi/transport_security.c: $(OPENSSL_DEP)
src/core/plugin_registry/grpc_cronet_plugin_registry.c: $(OPENSSL_DEP)
src/core/plugin_registry/grpc_plugin_registry.c: $(OPENSSL_DEP)
src/cpp/client/secure_credentials.cc: $(OPENSSL_DEP)
diff --git a/build.yaml b/build.yaml
index dc42a61300..57545839d4 100644
--- a/build.yaml
+++ b/build.yaml
@@ -7,7 +7,7 @@ settings:
'#3': Use "-preN" suffixes to identify pre-release versions
'#4': Per-language overrides are possible with (eg) ruby_version tag here
'#5': See the expand_version.py for all the quirks here
- version: 0.16.0-dev
+ version: 1.1.0-dev
filegroups:
- name: census
public_headers:
@@ -712,10 +712,10 @@ filegroups:
- src/cpp/util/status.cc
- src/cpp/util/string_ref.cc
- src/cpp/util/time.cc
+ deps:
+ - grpc
uses:
- grpc++_codegen_base
- - grpc_base
- - grpc_secure
- name: grpc++_codegen_base
language: c++
public_headers:
@@ -2430,20 +2430,6 @@ targets:
- grpc
- gpr_test_util
- gpr
-- name: workqueue_test
- build: test
- language: c
- src:
- - test/core/iomgr/workqueue_test.c
- deps:
- - grpc_test_util
- - grpc
- - gpr_test_util
- - gpr
- platforms:
- - mac
- - linux
- - posix
- name: alarm_cpp_test
gtest: true
build: test
diff --git a/composer.json b/composer.json
index 6e7f24b451..0ebe0a1108 100644
--- a/composer.json
+++ b/composer.json
@@ -5,15 +5,9 @@
"keywords": ["rpc"],
"homepage": "http://grpc.io",
"license": "BSD-3-Clause",
- "repositories": [
- {
- "type": "vcs",
- "url": "https://github.com/stanley-cheung/Protobuf-PHP"
- }
- ],
"require": {
"php": ">=5.5.0",
- "datto/protobuf-php": "dev-master"
+ "stanley-cheung/protobuf-php": "dev-master"
},
"require-dev": {
"google/auth": "v0.9"
diff --git a/doc/c-style-guide.md b/doc/c-style-guide.md
index d6f9bbd7d4..369bd56a46 100644
--- a/doc/c-style-guide.md
+++ b/doc/c-style-guide.md
@@ -9,16 +9,17 @@ Here we document style rules for C usage in the gRPC Core library.
General
-------
-- Layout rules are defined by clang-format, and all code should be passed through
- clang-format. A (docker-based) script to do so is included in
- [tools/distrib/clang\_format\_code.sh] (../tools/distrib/clang_format_code.sh).
+- Layout rules are defined by clang-format, and all code should be passed
+ through clang-format. A (docker-based) script to do so is included in
+ [tools/distrib/clang\_format\_code.sh](../tools/distrib/clang_format_code.sh).
Header Files
------------
-- Public header files (those in the include/grpc tree) should compile as pedantic C89
-- Public header files should be includable from C++ programs. That is, they should
- include the following:
+- Public header files (those in the include/grpc tree) should compile as
+ pedantic C89.
+- Public header files should be includable from C++ programs. That is, they
+ should include the following:
```c
#ifdef __cplusplus
extern "C" {
@@ -34,24 +35,34 @@ Header Files
- All header files should have a #define guard to prevent multiple inclusion.
To guarantee uniqueness they should be based on the file's path.
- For public headers: include/grpc/grpc.h --> GRPC_GRPC_H
+ For public headers: `include/grpc/grpc.h` → `GRPC_GRPC_H`
+
+ For private headers:
+ `src/core/channel/channel_stack.h` →
+ `GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H`
+
+Variable Initialization
+-----------------------
+
+When declaring a (non-static) pointer variable, always initialize it to `NULL`.
+Even in the case of static pointer variables, it's recommended to explicitly
+initialize them to `NULL`.
- For private headers:
- src/core/channel/channel_stack.h --> GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H
C99 Features
------------
-- Variable sized arrays are not allowed
-- Do not use the 'inline' keyword
-- Flexible array members are allowed (https://en.wikipedia.org/wiki/Flexible_array_member)
+- Variable sized arrays are not allowed.
+- Do not use the 'inline' keyword.
+- Flexible array members are allowed
+ (https://en.wikipedia.org/wiki/Flexible_array_member).
Comments
--------
Within public header files, only `/* */` comments are allowed.
-Within implementation files and private headers, either single line `//`
+Within implementation files and private headers, either single line `//`
or multi line `/* */` comments are allowed. Only one comment style per file is
allowed however (i.e. if single line comments are used anywhere within a file,
ALL comments within that file must be single line comments).
@@ -59,7 +70,15 @@ ALL comments within that file must be single line comments).
Symbol Names
------------
-- Non-static functions must be prefixed by grpc_
-- static functions must not be prefixed by grpc_
-- enumeration values and #define names are uppercased, all others are lowercased
-- Multiple word identifiers use underscore as a delimiter (NEVER camel casing)
+- Non-static functions must be prefixed by `grpc_`
+- Static functions must *not* be prefixed by `grpc_`
+- Enumeration values and `#define` names must be uppercase. All other values
+ must be lowercase.
+- Multiple word identifiers use underscore as a delimiter, *never* camel
+ case. E.g. `variable_name`.
+
+Functions
+----------
+
+- The use of [`atexit()`](http://man7.org/linux/man-pages/man3/atexit.3.html) is
+ in forbidden in libgrpc.
diff --git a/doc/statuscodes.md b/doc/statuscodes.md
index c918f9ed9a..1cd72df30a 100644
--- a/doc/statuscodes.md
+++ b/doc/statuscodes.md
@@ -18,6 +18,7 @@ Only a subset of the pre-defined status codes are generated by the gRPC librarie
| Could not decompress, but compression algorithm supported (Server -> Client) | INTERNAL | Client |
| Compression mechanism used by client not supported at server | UNIMPLEMENTED | Server |
| Server temporarily out of resources (e.g., Flow-control resource limits reached) | RESOURCE_EXHAUSTED | Server|
+| Client does not have enough memory to hold the server response | RESOURCE_EXHAUSTED | Client |
| Flow-control protocol violation | INTERNAL | Both |
| Error parsing returned status | UNKNOWN | Client |
| Incorrect Auth metadata ( Credentials failed to get metadata, Incompatible credentials set on channel and call, Invalid host set in `:authority` metadata, etc.) | UNAUTHENTICATED | Both |
diff --git a/examples/csharp/helloworld/README.md b/examples/csharp/helloworld/README.md
index 63131ed98c..d13c9ac9db 100644
--- a/examples/csharp/helloworld/README.md
+++ b/examples/csharp/helloworld/README.md
@@ -5,23 +5,16 @@ BACKGROUND
-------------
For this sample, we've already generated the server and client stubs from [helloworld.proto][].
-Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/)
+Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/), [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/)
and [Google.Protobuf](https://www.nuget.org/packages/Google.Protobuf/) NuGet packages
which have been already added to the project for you.
PREREQUISITES
-------------
-**Windows**
-- .NET 4.5+
-- Visual Studio 2013 or 2015
-**Linux**
-- Mono 4.0+
-- Monodevelop 5.9+ (with NuGet plugin installed)
-
-**Mac OS X**
-- Xamarin Studio 5.9+
-- [homebrew][]
+- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015
+- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed)
+- Mac OS X: Xamarin Studio 5.9+
BUILD
-------
@@ -56,6 +49,5 @@ Tutorial
You can find a more detailed tutorial in [gRPC Basics: C#][]
-[homebrew]:http://brew.sh
[helloworld.proto]:../../protos/helloworld.proto
[gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html
diff --git a/examples/objective-c/auth_sample/AuthTestService.podspec b/examples/objective-c/auth_sample/AuthTestService.podspec
index d246653ea7..af5ef28946 100644
--- a/examples/objective-c/auth_sample/AuthTestService.podspec
+++ b/examples/objective-c/auth_sample/AuthTestService.podspec
@@ -13,27 +13,51 @@ Pod::Spec.new do |s|
# Base directory where the .proto files are.
src = "../../protos"
+ # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+ s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+ # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+ pods_root = 'Pods'
+
+ # Path where Cocoapods downloads protoc and the gRPC plugin.
+ protoc_dir = "#{pods_root}/!ProtoCompiler"
+ protoc = "#{protoc_dir}/protoc"
+ plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
# Directory where the generated files will be placed.
- dir = "Pods/" + s.name
+ dir = "#{pods_root}/#{s.name}"
- # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
s.prepare_command = <<-CMD
mkdir -p #{dir}
- protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/auth_sample.proto
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=#{dir} \
+ --grpc_out=#{dir} \
+ -I #{src} \
+ -I #{protoc_dir} \
+ #{src}/auth_sample.proto
CMD
+ # Files generated by protoc
s.subspec "Messages" do |ms|
ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
ms.header_mappings_dir = dir
ms.requires_arc = false
- ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+ # The generated files depend on the protobuf runtime.
+ ms.dependency "Protobuf"
+ # This is needed by all pods that depend on Protobuf:
+ ms.pod_target_xcconfig = {
+ 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+ }
end
+ # Files generated by the gRPC plugin
s.subspec "Services" do |ss|
ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
ss.header_mappings_dir = dir
ss.requires_arc = true
- ss.dependency "gRPC", "~> 0.12"
+ # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+ ss.dependency "gRPC-ProtoRPC"
ss.dependency "#{s.name}/Messages"
end
end
diff --git a/examples/objective-c/auth_sample/Podfile b/examples/objective-c/auth_sample/Podfile
index 32157a9dce..a25d20f477 100644
--- a/examples/objective-c/auth_sample/Podfile
+++ b/examples/objective-c/auth_sample/Podfile
@@ -3,44 +3,10 @@ platform :ios, '8.0'
install! 'cocoapods', :deterministic_uuids => false
-# Location of gRPC's repo root relative to this file.
-GRPC_LOCAL_SRC = '../../..'
-
target 'AuthSample' do
# Depend on the generated AuthTestService library.
pod 'AuthTestService', :path => '.'
# Depend on Google's OAuth2 library
pod 'Google/SignIn'
-
- # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
- # lines in your application.
- pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
-
- pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
-
- pod 'gRPC', :path => GRPC_LOCAL_SRC
- pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
- pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
- pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC
-end
-
-# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in
-# your application.
-pre_install do |installer|
- # This is the gRPC-Core podspec object, as initialized by its podspec file.
- grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
-
- # Copied from gRPC-Core.podspec, except for the adjusted src_root:
- src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
- grpc_core_spec.pod_target_xcconfig = {
- 'GRPC_SRC_ROOT' => src_root,
- 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
- 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
- # If we don't set these two settings, `include/grpc/support/time.h` and
- # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
- # build.
- 'USE_HEADERMAP' => 'NO',
- 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
- }
end
diff --git a/examples/objective-c/helloworld/HelloWorld.podspec b/examples/objective-c/helloworld/HelloWorld.podspec
index 17b016b31a..bce6cd5172 100644
--- a/examples/objective-c/helloworld/HelloWorld.podspec
+++ b/examples/objective-c/helloworld/HelloWorld.podspec
@@ -13,27 +13,51 @@ Pod::Spec.new do |s|
# Base directory where the .proto files are.
src = "../../protos"
+ # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+ s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+ # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+ pods_root = 'Pods'
+
+ # Path where Cocoapods downloads protoc and the gRPC plugin.
+ protoc_dir = "#{pods_root}/!ProtoCompiler"
+ protoc = "#{protoc_dir}/protoc"
+ plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
# Directory where the generated files will be placed.
- dir = "Pods/" + s.name
+ dir = "#{pods_root}/#{s.name}"
- # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
s.prepare_command = <<-CMD
mkdir -p #{dir}
- protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/helloworld.proto
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=#{dir} \
+ --grpc_out=#{dir} \
+ -I #{src} \
+ -I #{protoc_dir} \
+ #{src}/helloworld.proto
CMD
+ # Files generated by protoc
s.subspec "Messages" do |ms|
ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
ms.header_mappings_dir = dir
ms.requires_arc = false
- ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+ # The generated files depend on the protobuf runtime.
+ ms.dependency "Protobuf"
+ # This is needed by all pods that depend on Protobuf:
+ ms.pod_target_xcconfig = {
+ 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+ }
end
+ # Files generated by the gRPC plugin
s.subspec "Services" do |ss|
ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
ss.header_mappings_dir = dir
ss.requires_arc = true
- ss.dependency "gRPC", "~> 0.12"
+ # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+ ss.dependency "gRPC-ProtoRPC"
ss.dependency "#{s.name}/Messages"
end
end
diff --git a/examples/objective-c/helloworld/Podfile b/examples/objective-c/helloworld/Podfile
index e1bb4ddfd5..0c3feaa47e 100644
--- a/examples/objective-c/helloworld/Podfile
+++ b/examples/objective-c/helloworld/Podfile
@@ -3,41 +3,7 @@ platform :ios, '8.0'
install! 'cocoapods', :deterministic_uuids => false
-# Location of gRPC's repo root relative to this file.
-GRPC_LOCAL_SRC = '../../..'
-
target 'HelloWorld' do
# Depend on the generated HelloWorld library.
pod 'HelloWorld', :path => '.'
-
- # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
- # lines in your application.
- pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
-
- pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
-
- pod 'gRPC', :path => GRPC_LOCAL_SRC
- pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
- pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
- pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC
-end
-
-# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in
-# your application.
-pre_install do |installer|
- # This is the gRPC-Core podspec object, as initialized by its podspec file.
- grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
-
- # Copied from gRPC-Core.podspec, except for the adjusted src_root:
- src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
- grpc_core_spec.pod_target_xcconfig = {
- 'GRPC_SRC_ROOT' => src_root,
- 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
- 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
- # If we don't set these two settings, `include/grpc/support/time.h` and
- # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
- # build.
- 'USE_HEADERMAP' => 'NO',
- 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
- }
end
diff --git a/examples/objective-c/route_guide/Podfile b/examples/objective-c/route_guide/Podfile
index 943f5464d8..b77eb1b11d 100644
--- a/examples/objective-c/route_guide/Podfile
+++ b/examples/objective-c/route_guide/Podfile
@@ -3,41 +3,7 @@ platform :ios, '8.0'
install! 'cocoapods', :deterministic_uuids => false
-# Location of gRPC's repo root relative to this file.
-GRPC_LOCAL_SRC = '../../..'
-
target 'RouteGuideClient' do
# Depend on the generated RouteGuide library.
pod 'RouteGuide', :path => '.'
-
- # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
- # lines in your application.
- pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
-
- pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
-
- pod 'gRPC', :path => GRPC_LOCAL_SRC
- pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
- pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
- pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC
-end
-
-# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in
-# your application.
-pre_install do |installer|
- # This is the gRPC-Core podspec object, as initialized by its podspec file.
- grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
-
- # Copied from gRPC-Core.podspec, except for the adjusted src_root:
- src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
- grpc_core_spec.pod_target_xcconfig = {
- 'GRPC_SRC_ROOT' => src_root,
- 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
- 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
- # If we don't set these two settings, `include/grpc/support/time.h` and
- # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
- # build.
- 'USE_HEADERMAP' => 'NO',
- 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
- }
end
diff --git a/examples/objective-c/route_guide/RouteGuide.podspec b/examples/objective-c/route_guide/RouteGuide.podspec
index 97a61ff51a..e213250751 100644
--- a/examples/objective-c/route_guide/RouteGuide.podspec
+++ b/examples/objective-c/route_guide/RouteGuide.podspec
@@ -13,27 +13,51 @@ Pod::Spec.new do |s|
# Base directory where the .proto files are.
src = "../../protos"
+ # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+ s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+ # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+ pods_root = 'Pods'
+
+ # Path where Cocoapods downloads protoc and the gRPC plugin.
+ protoc_dir = "#{pods_root}/!ProtoCompiler"
+ protoc = "#{protoc_dir}/protoc"
+ plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
# Directory where the generated files will be placed.
- dir = "Pods/" + s.name
+ dir = "#{pods_root}/#{s.name}"
- # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
s.prepare_command = <<-CMD
mkdir -p #{dir}
- protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/route_guide.proto
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=#{dir} \
+ --grpc_out=#{dir} \
+ -I #{src} \
+ -I #{protoc_dir} \
+ #{src}/route_guide.proto
CMD
+ # Files generated by protoc
s.subspec "Messages" do |ms|
ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
ms.header_mappings_dir = dir
ms.requires_arc = false
- ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+ # The generated files depend on the protobuf runtime.
+ ms.dependency "Protobuf"
+ # This is needed by all pods that depend on Protobuf:
+ ms.pod_target_xcconfig = {
+ 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+ }
end
+ # Files generated by the gRPC plugin
s.subspec "Services" do |ss|
ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
ss.header_mappings_dir = dir
ss.requires_arc = true
- ss.dependency "gRPC", "~> 0.12"
+ # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+ ss.dependency "gRPC-ProtoRPC"
ss.dependency "#{s.name}/Messages"
end
end
diff --git a/examples/php/README.md b/examples/php/README.md
index 6889a6cb7e..54cc97d8c2 100644
--- a/examples/php/README.md
+++ b/examples/php/README.md
@@ -11,7 +11,7 @@ INSTALL
- Install the gRPC PHP extension
```sh
- $ [sudo] pecl install grpc-beta
+ $ [sudo] pecl install grpc
```
- Clone this repository
diff --git a/examples/php/composer.json b/examples/php/composer.json
index 950e11367d..a8b790b1de 100644
--- a/examples/php/composer.json
+++ b/examples/php/composer.json
@@ -2,12 +2,6 @@
"name": "grpc/grpc-demo",
"description": "gRPC example for PHP",
"minimum-stability": "dev",
- "repositories": [
- {
- "type": "vcs",
- "url": "https://github.com/stanley-cheung/Protobuf-PHP"
- }
- ],
"require": {
"grpc/grpc": "v0.15.0"
}
diff --git a/include/grpc++/server.h b/include/grpc++/server.h
index 7a8858ef19..6876961e21 100644
--- a/include/grpc++/server.h
+++ b/include/grpc++/server.h
@@ -179,10 +179,13 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
grpc::mutex mu_;
bool started_;
bool shutdown_;
+ bool shutdown_notified_;
// The number of threads which are running callbacks.
int num_running_cb_;
grpc::condition_variable callback_cv_;
+ grpc::condition_variable shutdown_cv_;
+
std::shared_ptr<GlobalCallbacks> global_callbacks_;
std::list<SyncRequest>* sync_methods_;
diff --git a/include/grpc++/support/slice.h b/include/grpc++/support/slice.h
index cec9062d4f..5874b4f5ae 100644
--- a/include/grpc++/support/slice.h
+++ b/include/grpc++/support/slice.h
@@ -77,6 +77,9 @@ class Slice GRPC_FINAL {
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const uint8_t* end() const { return GPR_SLICE_END_PTR(slice_); }
+ /// Raw C slice. Caller needs to call gpr_slice_unref when done.
+ gpr_slice c_slice() const { return gpr_slice_ref(slice_); }
+
private:
friend class ByteBuffer;
diff --git a/package.json b/package.json
index 1fec9cb40f..0e229c9842 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "grpc",
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"author": "Google Inc.",
"description": "gRPC Library for Node",
"homepage": "http://www.grpc.io/",
diff --git a/package.xml b/package.xml
index 49a6d900f1..b5d5f4602a 100644
--- a/package.xml
+++ b/package.xml
@@ -10,18 +10,19 @@
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
- <date>2016-06-30</date>
+ <date>2016-07-13</date>
<time>16:06:07</time>
<version>
- <release>0.16.0</release>
- <api>0.16.0</api>
+ <release>1.1.0</release>
+ <api>1.1.0</api>
</version>
<stability>
- <release>beta</release>
- <api>beta</api>
+ <release>stable</release>
+ <api>stable</api>
</stability>
<license>BSD</license>
<notes>
+- GA release
- Fix shutdown hang problem #4017
</notes>
<contents>
@@ -1086,16 +1087,17 @@ Update to wrap gRPC C Core version 0.10.0
</release>
<release>
<version>
- <release>0.15.1</release>
- <api>0.15.1</api>
+ <release>1.0.0</release>
+ <api>1.0.0</api>
</version>
<stability>
- <release>beta</release>
- <api>beta</api>
+ <release>stable</release>
+ <api>stable</api>
</stability>
- <date>2016-06-30</date>
+ <date>2016-07-13</date>
<license>BSD</license>
<notes>
+- GA release
- Fix shutdown hang problem #4017
</notes>
</release>
diff --git a/setup.py b/setup.py
index 700515b894..6dbc169053 100644
--- a/setup.py
+++ b/setup.py
@@ -62,15 +62,17 @@ import commands
import grpc_core_dependencies
import grpc_version
-# TODO(atash) make this conditional on being on a mingw32 build
-_unixccompiler_patch.monkeypatch_unix_compiler()
+if 'win32' in sys.platform:
+ _unixccompiler_patch.monkeypatch_unix_compiler()
LICENSE = '3-clause BSD'
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
-# to have been generated by building first *with* Cython support.
+# to have been generated by building first *with* Cython support. Even if this
+# is set to false, if the script detects that the generated `.c` file isn't
+# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not to enable coverage analysis
@@ -82,9 +84,40 @@ ENABLE_CYTHON_TRACING = os.environ.get(
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
-# By default we assume a GCC-like compiler.
-EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', ''))
-EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', ''))
+# We can also use these variables as a way to inject environment-specific
+# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
+# reasonable default.
+EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
+EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
+if EXTRA_ENV_COMPILE_ARGS is None:
+ EXTRA_ENV_COMPILE_ARGS = '-fno-wrapv'
+ if 'win32' in sys.platform:
+ # We use define flags here and don't directly add to DEFINE_MACROS below to
+ # ensure that the expert user/builder has a way of turning it off (via the
+ # envvars) without adding yet more GRPC-specific envvars.
+ # See https://sourceforge.net/p/mingw-w64/bugs/363/
+ if '32' in platform.architecture()[0]:
+ EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
+ else:
+ EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
+ elif "linux" in sys.platform or "darwin" in sys.platform:
+ EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden'
+if EXTRA_ENV_LINK_ARGS is None:
+ EXTRA_ENV_LINK_ARGS = '-lpthread'
+ if 'win32' in sys.platform:
+ # TODO(atash) check if this is actually safe to just import and call on
+ # non-Windows (to avoid breaking import style)
+ from distutils.cygwinccompiler import get_msvcr
+ msvcr = get_msvcr()[0]
+ # TODO(atash) sift through the GCC specs to see if libstdc++ can have any
+ # influence on the linkage outcome on MinGW for non-C++ programs.
+ EXTRA_ENV_LINK_ARGS += (
+ ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
+ '-static'.format(msvcr=msvcr))
+ elif "linux" in sys.platform:
+ EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy'
+EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
+EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
@@ -116,13 +149,8 @@ if "win32" in sys.platform:
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
-if "linux" in sys.platform:
- LDFLAGS += ('-Wl,-wrap,memcpy',)
if "linux" in sys.platform or "darwin" in sys.platform:
- CFLAGS += ('-fvisibility=hidden',)
-
pymodinit_type = 'PyObject*' if PY3 else 'void'
-
pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type)
DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
@@ -137,8 +165,13 @@ if 'darwin' in sys.platform and PY3:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.7'
-def cython_extensions(module_names, extra_sources, include_dirs,
- libraries, define_macros, build_with_cython=False):
+def cython_extensions():
+ module_names = list(CYTHON_EXTENSION_MODULE_NAMES)
+ extra_sources = list(CYTHON_HELPER_C_FILES) + list(CORE_C_FILES)
+ include_dirs = list(EXTENSION_INCLUDE_DIRECTORIES)
+ libraries = list(EXTENSION_LIBRARIES)
+ define_macros = list(DEFINE_MACROS)
+ build_with_cython = bool(BUILD_WITH_CYTHON)
# Set compiler directives linetrace argument only if we care about tracing;
# this is due to Cython having different behavior between linetrace being
# False and linetrace being unset. See issue #5689.
@@ -146,10 +179,20 @@ def cython_extensions(module_names, extra_sources, include_dirs,
if ENABLE_CYTHON_TRACING:
define_macros = define_macros + [('CYTHON_TRACE_NOGIL', 1)]
cython_compiler_directives['linetrace'] = True
- file_extension = 'pyx' if build_with_cython else 'c'
- module_files = [os.path.join(PYTHON_STEM,
- name.replace('.', '/') + '.' + file_extension)
- for name in module_names]
+ pyx_module_files = [os.path.join(PYTHON_STEM,
+ name.replace('.', '/') + '.pyx')
+ for name in module_names]
+ c_module_files = [os.path.join(PYTHON_STEM,
+ name.replace('.', '/') + '.c')
+ for name in module_names]
+ if not build_with_cython:
+ for module_file in c_module_files:
+ if not os.path.isfile(module_file):
+ sys.stderr.write('Cython-generated files are missing; '
+ 'forcing Cython build...\n')
+ build_with_cython = True
+ break
+ module_files = pyx_module_files if build_with_cython else c_module_files
extensions = [
_extension.Extension(
name=module_name,
@@ -169,11 +212,7 @@ def cython_extensions(module_names, extra_sources, include_dirs,
else:
return extensions
-CYTHON_EXTENSION_MODULES = cython_extensions(
- list(CYTHON_EXTENSION_MODULE_NAMES),
- list(CYTHON_HELPER_C_FILES) + list(CORE_C_FILES),
- list(EXTENSION_INCLUDE_DIRECTORIES), list(EXTENSION_LIBRARIES),
- list(DEFINE_MACROS), bool(BUILD_WITH_CYTHON))
+CYTHON_EXTENSION_MODULES = cython_extensions()
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
diff --git a/src/compiler/config.h b/src/compiler/config.h
index 1cbd842f0a..ba44cd8a31 100644
--- a/src/compiler/config.h
+++ b/src/compiler/config.h
@@ -60,7 +60,8 @@
#ifndef GRPC_CUSTOM_PARSEGENERATORPARAMETER
#include <google/protobuf/compiler/code_generator.h>
-#define GRPC_CUSTOM_PARSEGENERATORPARAMETER ::google::protobuf::compiler::ParseGeneratorParameter
+#define GRPC_CUSTOM_PARSEGENERATORPARAMETER \
+ ::google::protobuf::compiler::ParseGeneratorParameter
#endif
#ifndef GRPC_CUSTOM_STRING
@@ -81,8 +82,8 @@ static inline int PluginMain(int argc, char* argv[],
const CodeGenerator* generator) {
return GRPC_CUSTOM_PLUGINMAIN(argc, argv, generator);
}
-static inline void ParseGeneratorParameter(const string& parameter,
- std::vector<std::pair<string, string> >* options) {
+static inline void ParseGeneratorParameter(
+ const string& parameter, std::vector<std::pair<string, string> >* options) {
GRPC_CUSTOM_PARSEGENERATORPARAMETER(parameter, options);
}
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index 2288ba4163..c386115ec2 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -64,19 +64,22 @@ grpc::string FilenameIdentifier(const grpc::string &filename) {
}
} // namespace
-template<class T, size_t N>
-T *array_end(T (&array)[N]) { return array + N; }
+template <class T, size_t N>
+T *array_end(T (&array)[N]) {
+ return array + N;
+}
-void PrintIncludes(Printer *printer, const std::vector<grpc::string>& headers, const Parameters &params) {
+void PrintIncludes(Printer *printer, const std::vector<grpc::string> &headers,
+ const Parameters &params) {
std::map<grpc::string, grpc::string> vars;
vars["l"] = params.use_system_headers ? '<' : '"';
vars["r"] = params.use_system_headers ? '>' : '"';
- auto& s = params.grpc_search_path;
+ auto &s = params.grpc_search_path;
if (!s.empty()) {
vars["l"] += s;
- if (s[s.size()-1] != '/') {
+ if (s[s.size() - 1] != '/') {
vars["l"] += '/';
}
}
@@ -101,7 +104,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) {
printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer->Print(vars,
- "// If you make any local change, they will be lost.\n");
+ "// If you make any local change, they will be lost.\n");
printer->Print(vars, "// source: $filename$\n");
grpc::string leading_comments = file->GetLeadingComments();
if (!leading_comments.empty()) {
@@ -117,8 +120,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) {
return output;
}
-grpc::string GetHeaderIncludes(File *file,
- const Parameters &params) {
+grpc::string GetHeaderIncludes(File *file, const Parameters &params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@@ -126,15 +128,14 @@ grpc::string GetHeaderIncludes(File *file,
std::map<grpc::string, grpc::string> vars;
static const char *headers_strs[] = {
- "grpc++/impl/codegen/async_stream.h",
- "grpc++/impl/codegen/async_unary_call.h",
- "grpc++/impl/codegen/proto_utils.h",
- "grpc++/impl/codegen/rpc_method.h",
- "grpc++/impl/codegen/service_type.h",
- "grpc++/impl/codegen/status.h",
- "grpc++/impl/codegen/stub_options.h",
- "grpc++/impl/codegen/sync_stream.h"
- };
+ "grpc++/impl/codegen/async_stream.h",
+ "grpc++/impl/codegen/async_unary_call.h",
+ "grpc++/impl/codegen/proto_utils.h",
+ "grpc++/impl/codegen/rpc_method.h",
+ "grpc++/impl/codegen/service_type.h",
+ "grpc++/impl/codegen/status.h",
+ "grpc++/impl/codegen/stub_options.h",
+ "grpc++/impl/codegen/sync_stream.h"};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
PrintIncludes(printer.get(), headers, params);
printer->Print(vars, "\n");
@@ -309,8 +310,7 @@ void PrintHeaderClientMethodInterfaces(
}
}
-void PrintHeaderClientMethod(Printer *printer,
- const Method *method,
+void PrintHeaderClientMethod(Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars,
bool is_public) {
(*vars)["Method"] = method->name();
@@ -490,10 +490,8 @@ void PrintHeaderServerMethodSync(Printer *printer, const Method *method,
printer->Print(method->GetTrailingComments().c_str());
}
-void PrintHeaderServerMethodAsync(
- Printer *printer,
- const Method *method,
- std::map<grpc::string, grpc::string> *vars) {
+void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
+ std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
@@ -607,8 +605,7 @@ void PrintHeaderServerMethodAsync(
}
void PrintHeaderServerMethodGeneric(
- Printer *printer,
- const Method *method,
+ Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
@@ -677,8 +674,7 @@ void PrintHeaderServerMethodGeneric(
printer->Print(*vars, "};\n");
}
-void PrintHeaderService(Printer *printer,
- const Service *service,
+void PrintHeaderService(Printer *printer, const Service *service,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Service"] = service->name();
@@ -696,14 +692,16 @@ void PrintHeaderService(Printer *printer,
printer->Print("virtual ~StubInterface() {}\n");
for (int i = 0; i < service->method_count(); ++i) {
printer->Print(service->method(i)->GetLeadingComments().c_str());
- PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, true);
+ PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars,
+ true);
printer->Print(service->method(i)->GetTrailingComments().c_str());
}
printer->Outdent();
printer->Print("private:\n");
printer->Indent();
for (int i = 0; i < service->method_count(); ++i) {
- PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, false);
+ PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars,
+ false);
}
printer->Outdent();
printer->Print("};\n");
@@ -711,7 +709,8 @@ void PrintHeaderService(Printer *printer,
"class Stub GRPC_FINAL : public StubInterface"
" {\n public:\n");
printer->Indent();
- printer->Print("Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
+ printer->Print(
+ "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
}
@@ -776,8 +775,7 @@ void PrintHeaderService(Printer *printer,
printer->Print(service->GetTrailingComments().c_str());
}
-grpc::string GetHeaderServices(File *file,
- const Parameters &params) {
+grpc::string GetHeaderServices(File *file, const Parameters &params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@@ -849,7 +847,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) {
printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer->Print(vars,
- "// If you make any local change, they will be lost.\n");
+ "// If you make any local change, they will be lost.\n");
printer->Print(vars, "// source: $filename$\n\n");
printer->Print(vars, "#include \"$filename_base$$message_header_ext$\"\n");
@@ -860,8 +858,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) {
return output;
}
-grpc::string GetSourceIncludes(File *file,
- const Parameters &params) {
+grpc::string GetSourceIncludes(File *file, const Parameters &params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
@@ -869,15 +866,14 @@ grpc::string GetSourceIncludes(File *file,
std::map<grpc::string, grpc::string> vars;
static const char *headers_strs[] = {
- "grpc++/impl/codegen/async_stream.h",
- "grpc++/impl/codegen/async_unary_call.h",
- "grpc++/impl/codegen/channel_interface.h",
- "grpc++/impl/codegen/client_unary_call.h",
- "grpc++/impl/codegen/method_handler_impl.h",
- "grpc++/impl/codegen/rpc_service_method.h",
- "grpc++/impl/codegen/service_type.h",
- "grpc++/impl/codegen/sync_stream.h"
- };
+ "grpc++/impl/codegen/async_stream.h",
+ "grpc++/impl/codegen/async_unary_call.h",
+ "grpc++/impl/codegen/channel_interface.h",
+ "grpc++/impl/codegen/client_unary_call.h",
+ "grpc++/impl/codegen/method_handler_impl.h",
+ "grpc++/impl/codegen/rpc_service_method.h",
+ "grpc++/impl/codegen/service_type.h",
+ "grpc++/impl/codegen/sync_stream.h"};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
PrintIncludes(printer.get(), headers, params);
@@ -895,8 +891,7 @@ grpc::string GetSourceIncludes(File *file,
return output;
}
-void PrintSourceClientMethod(Printer *printer,
- const Method *method,
+void PrintSourceClientMethod(Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
@@ -996,8 +991,7 @@ void PrintSourceClientMethod(Printer *printer,
}
}
-void PrintSourceServerMethod(Printer *printer,
- const Method *method,
+void PrintSourceServerMethod(Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
@@ -1055,8 +1049,7 @@ void PrintSourceServerMethod(Printer *printer,
}
}
-void PrintSourceService(Printer *printer,
- const Service *service,
+void PrintSourceService(Printer *printer, const Service *service,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Service"] = service->name();
@@ -1168,8 +1161,7 @@ void PrintSourceService(Printer *printer,
}
}
-grpc::string GetSourceServices(File *file,
- const Parameters &params) {
+grpc::string GetSourceServices(File *file, const Parameters &params) {
grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
diff --git a/src/compiler/cpp_plugin.cc b/src/compiler/cpp_plugin.cc
index fc0296cd28..38f8f738ed 100644
--- a/src/compiler/cpp_plugin.cc
+++ b/src/compiler/cpp_plugin.cc
@@ -48,7 +48,7 @@ using grpc_cpp_generator::GetCppComments;
class ProtoBufMethod : public grpc_cpp_generator::Method {
public:
ProtoBufMethod(const grpc::protobuf::MethodDescriptor *method)
- : method_(method) {}
+ : method_(method) {}
grpc::string name() const { return method_->name(); }
@@ -90,14 +90,14 @@ class ProtoBufMethod : public grpc_cpp_generator::Method {
class ProtoBufService : public grpc_cpp_generator::Service {
public:
ProtoBufService(const grpc::protobuf::ServiceDescriptor *service)
- : service_(service) {}
+ : service_(service) {}
grpc::string name() const { return service_->name(); }
int method_count() const { return service_->method_count(); };
std::unique_ptr<const grpc_cpp_generator::Method> method(int i) const {
return std::unique_ptr<const grpc_cpp_generator::Method>(
- new ProtoBufMethod(service_->method(i)));
+ new ProtoBufMethod(service_->method(i)));
};
grpc::string GetLeadingComments() const {
@@ -115,7 +115,7 @@ class ProtoBufService : public grpc_cpp_generator::Service {
class ProtoBufPrinter : public grpc_cpp_generator::Printer {
public:
ProtoBufPrinter(grpc::string *str)
- : output_stream_(str), printer_(&output_stream_, '$') {}
+ : output_stream_(str), printer_(&output_stream_, '$') {}
void Print(const std::map<grpc::string, grpc::string> &vars,
const char *string_template) {
@@ -152,13 +152,14 @@ class ProtoBufFile : public grpc_cpp_generator::File {
int service_count() const { return file_->service_count(); };
std::unique_ptr<const grpc_cpp_generator::Service> service(int i) const {
- return std::unique_ptr<const grpc_cpp_generator::Service> (
- new ProtoBufService(file_->service(i)));
+ return std::unique_ptr<const grpc_cpp_generator::Service>(
+ new ProtoBufService(file_->service(i)));
}
- std::unique_ptr<grpc_cpp_generator::Printer> CreatePrinter(grpc::string *str) const {
+ std::unique_ptr<grpc_cpp_generator::Printer> CreatePrinter(
+ grpc::string *str) const {
return std::unique_ptr<grpc_cpp_generator::Printer>(
- new ProtoBufPrinter(str));
+ new ProtoBufPrinter(str));
}
grpc::string GetLeadingComments() const {
@@ -197,12 +198,11 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
if (!parameter.empty()) {
std::vector<grpc::string> parameters_list =
- grpc_generator::tokenize(parameter, ",");
+ grpc_generator::tokenize(parameter, ",");
for (auto parameter_string = parameters_list.begin();
- parameter_string != parameters_list.end();
- parameter_string++) {
+ parameter_string != parameters_list.end(); parameter_string++) {
std::vector<grpc::string> param =
- grpc_generator::tokenize(*parameter_string, "=");
+ grpc_generator::tokenize(*parameter_string, "=");
if (param[0] == "services_namespace") {
generator_parameters.services_namespace = param[1];
} else if (param[0] == "use_system_headers") {
@@ -232,8 +232,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
grpc_cpp_generator::GetHeaderEpilogue(&pbfile, generator_parameters);
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> header_output(
context->Open(file_name + ".grpc.pb.h"));
- grpc::protobuf::io::CodedOutputStream header_coded_out(
- header_output.get());
+ grpc::protobuf::io::CodedOutputStream header_coded_out(header_output.get());
header_coded_out.WriteRaw(header_code.data(), header_code.size());
grpc::string source_code =
@@ -243,8 +242,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
grpc_cpp_generator::GetSourceEpilogue(&pbfile, generator_parameters);
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> source_output(
context->Open(file_name + ".grpc.pb.cc"));
- grpc::protobuf::io::CodedOutputStream source_coded_out(
- source_output.get());
+ grpc::protobuf::io::CodedOutputStream source_coded_out(source_output.get());
source_coded_out.WriteRaw(source_code.data(), source_code.size());
return true;
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index f5a0876cf9..591e5ae3d4 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -36,11 +36,10 @@
#include <sstream>
#include <vector>
-#include "src/compiler/csharp_generator.h"
#include "src/compiler/config.h"
-#include "src/compiler/csharp_generator_helpers.h"
#include "src/compiler/csharp_generator.h"
-
+#include "src/compiler/csharp_generator.h"
+#include "src/compiler/csharp_generator_helpers.h"
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetClassName;
@@ -61,7 +60,6 @@ using grpc_generator::StringReplace;
using std::map;
using std::vector;
-
namespace grpc_csharp_generator {
namespace {
@@ -70,34 +68,43 @@ namespace {
// Currently, we cannot easily reuse the functionality as
// google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header.
// TODO(jtattermusch): reuse the functionality from google/protobuf.
-void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::protobuf::SourceLocation location) {
- grpc::string comments = location.leading_comments.empty() ?
- location.trailing_comments : location.leading_comments;
+void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer,
+ grpc::protobuf::SourceLocation location) {
+ grpc::string comments = location.leading_comments.empty()
+ ? location.trailing_comments
+ : location.leading_comments;
if (comments.empty()) {
return;
}
- // XML escaping... no need for apostrophes etc as the whole text is going to be a child
+ // XML escaping... no need for apostrophes etc as the whole text is going to
+ // be a child
// node of a summary element, not part of an attribute.
comments = grpc_generator::StringReplace(comments, "&", "&amp;", true);
comments = grpc_generator::StringReplace(comments, "<", "&lt;", true);
std::vector<grpc::string> lines;
grpc_generator::Split(comments, '\n', &lines);
- // TODO: We really should work out which part to put in the summary and which to put in the remarks...
- // but that needs to be part of a bigger effort to understand the markdown better anyway.
+ // TODO: We really should work out which part to put in the summary and which
+ // to put in the remarks...
+ // but that needs to be part of a bigger effort to understand the markdown
+ // better anyway.
printer->Print("/// <summary>\n");
bool last_was_empty = false;
- // We squash multiple blank lines down to one, and remove any trailing blank lines. We need
- // to preserve the blank lines themselves, as this is relevant in the markdown.
- // Note that we can't remove leading or trailing whitespace as *that's* relevant in markdown too.
+ // We squash multiple blank lines down to one, and remove any trailing blank
+ // lines. We need
+ // to preserve the blank lines themselves, as this is relevant in the
+ // markdown.
+ // Note that we can't remove leading or trailing whitespace as *that's*
+ // relevant in markdown too.
// (We don't skip "just whitespace" lines, either.)
- for (std::vector<grpc::string>::iterator it = lines.begin(); it != lines.end(); ++it) {
+ for (std::vector<grpc::string>::iterator it = lines.begin();
+ it != lines.end(); ++it) {
grpc::string line = *it;
if (line.empty()) {
last_was_empty = true;
} else {
if (last_was_empty) {
- printer->Print("///\n");
+ printer->Print("///\n");
}
last_was_empty = false;
printer->Print("/// $line$\n", "line", *it);
@@ -107,23 +114,23 @@ void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::prot
}
template <typename DescriptorType>
-void GenerateDocCommentBody(
- grpc::protobuf::io::Printer* printer, const DescriptorType* descriptor) {
+void GenerateDocCommentBody(grpc::protobuf::io::Printer *printer,
+ const DescriptorType *descriptor) {
grpc::protobuf::SourceLocation location;
if (descriptor->GetSourceLocation(&location)) {
GenerateDocCommentBodyImpl(printer, location);
}
}
-std::string GetServiceClassName(const ServiceDescriptor* service) {
+std::string GetServiceClassName(const ServiceDescriptor *service) {
return service->name();
}
-std::string GetClientClassName(const ServiceDescriptor* service) {
+std::string GetClientClassName(const ServiceDescriptor *service) {
return service->name() + "Client";
}
-std::string GetServerClassName(const ServiceDescriptor* service) {
+std::string GetServerClassName(const ServiceDescriptor *service) {
return service->name() + "Base";
}
@@ -138,13 +145,11 @@ std::string GetCSharpMethodType(MethodType method_type) {
case METHODTYPE_BIDI_STREAMING:
return "MethodType.DuplexStreaming";
}
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
}
-std::string GetServiceNameFieldName() {
- return "__ServiceName";
-}
+std::string GetServiceNameFieldName() { return "__ServiceName"; }
std::string GetMarshallerFieldName(const Descriptor *message) {
return "__Marshaller_" + message->name();
@@ -155,7 +160,7 @@ std::string GetMethodFieldName(const MethodDescriptor *method) {
}
std::string GetMethodRequestParamMaybe(const MethodDescriptor *method,
- bool invocation_param=false) {
+ bool invocation_param = false) {
if (method->client_streaming()) {
return "";
}
@@ -174,16 +179,16 @@ std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
case METHODTYPE_NO_STREAMING:
return "AsyncUnaryCall<" + GetClassName(method->output_type()) + ">";
case METHODTYPE_CLIENT_STREAMING:
- return "AsyncClientStreamingCall<" + GetClassName(method->input_type())
- + ", " + GetClassName(method->output_type()) + ">";
+ return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) +
+ ", " + GetClassName(method->output_type()) + ">";
case METHODTYPE_SERVER_STREAMING:
- return "AsyncServerStreamingCall<" + GetClassName(method->output_type())
- + ">";
+ return "AsyncServerStreamingCall<" + GetClassName(method->output_type()) +
+ ">";
case METHODTYPE_BIDI_STREAMING:
- return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type())
- + ", " + GetClassName(method->output_type()) + ">";
+ return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type()) +
+ ", " + GetClassName(method->output_type()) + ">";
}
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
}
@@ -194,10 +199,10 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
return GetClassName(method->input_type()) + " request";
case METHODTYPE_CLIENT_STREAMING:
case METHODTYPE_BIDI_STREAMING:
- return "IAsyncStreamReader<" + GetClassName(method->input_type())
- + "> requestStream";
+ return "IAsyncStreamReader<" + GetClassName(method->input_type()) +
+ "> requestStream";
}
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
}
@@ -205,12 +210,13 @@ std::string GetMethodReturnTypeServer(const MethodDescriptor *method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_CLIENT_STREAMING:
- return "global::System.Threading.Tasks.Task<" + GetClassName(method->output_type()) + ">";
+ return "global::System.Threading.Tasks.Task<" +
+ GetClassName(method->output_type()) + ">";
case METHODTYPE_SERVER_STREAMING:
case METHODTYPE_BIDI_STREAMING:
return "global::System.Threading.Tasks.Task";
}
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
}
@@ -221,18 +227,19 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
return "";
case METHODTYPE_SERVER_STREAMING:
case METHODTYPE_BIDI_STREAMING:
- return ", IServerStreamWriter<" + GetClassName(method->output_type())
- + "> responseStream";
+ return ", IServerStreamWriter<" + GetClassName(method->output_type()) +
+ "> responseStream";
}
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
}
// Gets vector of all messages used as input or output types.
-std::vector<const Descriptor*> GetUsedMessages(
+std::vector<const Descriptor *> GetUsedMessages(
const ServiceDescriptor *service) {
- std::set<const Descriptor*> descriptor_set;
- std::vector<const Descriptor*> result; // vector is to maintain stable ordering
+ std::set<const Descriptor *> descriptor_set;
+ std::vector<const Descriptor *>
+ result; // vector is to maintain stable ordering
for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i);
if (descriptor_set.find(method->input_type()) == descriptor_set.end()) {
@@ -247,21 +254,25 @@ std::vector<const Descriptor*> GetUsedMessages(
return result;
}
-void GenerateMarshallerFields(Printer* out, const ServiceDescriptor *service) {
- std::vector<const Descriptor*> used_messages = GetUsedMessages(service);
+void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
+ std::vector<const Descriptor *> used_messages = GetUsedMessages(service);
for (size_t i = 0; i < used_messages.size(); i++) {
const Descriptor *message = used_messages[i];
out->Print(
- "static readonly Marshaller<$type$> $fieldname$ = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), $type$.Parser.ParseFrom);\n",
+ "static readonly Marshaller<$type$> $fieldname$ = "
+ "Marshallers.Create((arg) => "
+ "global::Google.Protobuf.MessageExtensions.ToByteArray(arg), "
+ "$type$.Parser.ParseFrom);\n",
"fieldname", GetMarshallerFieldName(message), "type",
GetClassName(message));
}
out->Print("\n");
}
-void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
+void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) {
out->Print(
- "static readonly Method<$request$, $response$> $fieldname$ = new Method<$request$, $response$>(\n",
+ "static readonly Method<$request$, $response$> $fieldname$ = new "
+ "Method<$request$, $response$>(\n",
"fieldname", GetMethodFieldName(method), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
@@ -270,7 +281,7 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
out->Print("$methodtype$,\n", "methodtype",
GetCSharpMethodType(GetMethodType(method)));
out->Print("$servicenamefield$,\n", "servicenamefield",
- GetServiceNameFieldName());
+ GetServiceNameFieldName());
out->Print("\"$methodname$\",\n", "methodname", method->name());
out->Print("$requestmarshaller$,\n", "requestmarshaller",
GetMarshallerFieldName(method->input_type()));
@@ -281,11 +292,14 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
out->Outdent();
}
-void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *service) {
+void GenerateServiceDescriptorProperty(Printer *out,
+ const ServiceDescriptor *service) {
std::ostringstream index;
index << service->index();
out->Print("/// <summary>Service descriptor</summary>\n");
- out->Print("public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor\n");
+ out->Print(
+ "public static global::Google.Protobuf.Reflection.ServiceDescriptor "
+ "Descriptor\n");
out->Print("{\n");
out->Print(" get { return $umbrella$.Descriptor.Services[$index$]; }\n",
"umbrella", GetReflectionClassName(service->file()), "index",
@@ -294,9 +308,11 @@ void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *se
out->Print("\n");
}
-void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
- out->Print("/// <summary>Base class for server-side implementations of $servicename$</summary>\n",
- "servicename", GetServiceClassName(service));
+void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
+ out->Print(
+ "/// <summary>Base class for server-side implementations of "
+ "$servicename$</summary>\n",
+ "servicename", GetServiceClassName(service));
out->Print("public abstract class $name$\n", "name",
GetServerClassName(service));
out->Print("{\n");
@@ -305,7 +321,8 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
const MethodDescriptor *method = service->method(i);
GenerateDocCommentBody(out, method);
out->Print(
- "public virtual $returntype$ $methodname$($request$$response_stream_maybe$, "
+ "public virtual $returntype$ "
+ "$methodname$($request$$response_stream_maybe$, "
"ServerCallContext context)\n",
"methodname", method->name(), "returntype",
GetMethodReturnTypeServer(method), "request",
@@ -313,8 +330,9 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
GetMethodResponseStreamMaybe(method));
out->Print("{\n");
out->Indent();
- out->Print("throw new RpcException("
- "new Status(StatusCode.Unimplemented, \"\"));\n");
+ out->Print(
+ "throw new RpcException("
+ "new Status(StatusCode.Unimplemented, \"\"));\n");
out->Outdent();
out->Print("}\n\n");
}
@@ -323,41 +341,49 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
out->Print("\n");
}
-void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
- out->Print("/// <summary>Client for $servicename$</summary>\n",
- "servicename", GetServiceClassName(service));
- out->Print(
- "public class $name$ : ClientBase<$name$>\n",
- "name", GetClientClassName(service));
+void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
+ out->Print("/// <summary>Client for $servicename$</summary>\n", "servicename",
+ GetServiceClassName(service));
+ out->Print("public class $name$ : ClientBase<$name$>\n", "name",
+ GetClientClassName(service));
out->Print("{\n");
out->Indent();
// constructors
- out->Print("/// <summary>Creates a new client for $servicename$</summary>\n"
- "/// <param name=\"channel\">The channel to use to make remote calls.</param>\n",
- "servicename", GetServiceClassName(service));
- out->Print("public $name$(Channel channel) : base(channel)\n",
- "name", GetClientClassName(service));
+ out->Print(
+ "/// <summary>Creates a new client for $servicename$</summary>\n"
+ "/// <param name=\"channel\">The channel to use to make remote "
+ "calls.</param>\n",
+ "servicename", GetServiceClassName(service));
+ out->Print("public $name$(Channel channel) : base(channel)\n", "name",
+ GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
- out->Print("/// <summary>Creates a new client for $servicename$ that uses a custom <c>CallInvoker</c>.</summary>\n"
- "/// <param name=\"callInvoker\">The callInvoker to use to make remote calls.</param>\n",
- "servicename", GetServiceClassName(service));
+ out->Print(
+ "/// <summary>Creates a new client for $servicename$ that uses a custom "
+ "<c>CallInvoker</c>.</summary>\n"
+ "/// <param name=\"callInvoker\">The callInvoker to use to make remote "
+ "calls.</param>\n",
+ "servicename", GetServiceClassName(service));
out->Print("public $name$(CallInvoker callInvoker) : base(callInvoker)\n",
"name", GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
- out->Print("/// <summary>Protected parameterless constructor to allow creation"
- " of test doubles.</summary>\n");
- out->Print("protected $name$() : base()\n",
- "name", GetClientClassName(service));
+ out->Print(
+ "/// <summary>Protected parameterless constructor to allow creation"
+ " of test doubles.</summary>\n");
+ out->Print("protected $name$() : base()\n", "name",
+ GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
- out->Print("/// <summary>Protected constructor to allow creation of configured clients.</summary>\n"
- "/// <param name=\"configuration\">The client configuration.</param>\n");
- out->Print("protected $name$(ClientBaseConfiguration configuration)"
- " : base(configuration)\n",
- "name", GetClientClassName(service));
+ out->Print(
+ "/// <summary>Protected constructor to allow creation of configured "
+ "clients.</summary>\n"
+ "/// <param name=\"configuration\">The client configuration.</param>\n");
+ out->Print(
+ "protected $name$(ClientBaseConfiguration configuration)"
+ " : base(configuration)\n",
+ "name", GetClientClassName(service));
out->Print("{\n");
out->Print("}\n\n");
@@ -368,27 +394,36 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
if (method_type == METHODTYPE_NO_STREAMING) {
// unary calls have an extra synchronous stub method
GenerateDocCommentBody(out, method);
- out->Print("public virtual $response$ $methodname$($request$ request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n",
+ out->Print(
+ "public virtual $response$ $methodname$($request$ request, Metadata "
+ "headers = null, DateTime? deadline = null, CancellationToken "
+ "cancellationToken = default(CancellationToken))\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
out->Print("{\n");
out->Indent();
- out->Print("return $methodname$(request, new CallOptions(headers, deadline, cancellationToken));\n",
- "methodname", method->name());
+ out->Print(
+ "return $methodname$(request, new CallOptions(headers, deadline, "
+ "cancellationToken));\n",
+ "methodname", method->name());
out->Outdent();
out->Print("}\n");
// overload taking CallOptions as a param
GenerateDocCommentBody(out, method);
- out->Print("public virtual $response$ $methodname$($request$ request, CallOptions options)\n",
+ out->Print(
+ "public virtual $response$ $methodname$($request$ request, "
+ "CallOptions options)\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
out->Print("{\n");
out->Indent();
- out->Print("return CallInvoker.BlockingUnaryCall($methodfield$, null, options, request);\n",
- "methodfield", GetMethodFieldName(method));
+ out->Print(
+ "return CallInvoker.BlockingUnaryCall($methodfield$, null, options, "
+ "request);\n",
+ "methodfield", GetMethodFieldName(method));
out->Outdent();
out->Print("}\n");
}
@@ -399,23 +434,28 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
}
GenerateDocCommentBody(out, method);
out->Print(
- "public virtual $returntype$ $methodname$($request_maybe$Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n",
- "methodname", method_name, "request_maybe",
- GetMethodRequestParamMaybe(method), "returntype",
- GetMethodReturnTypeClient(method));
+ "public virtual $returntype$ $methodname$($request_maybe$Metadata "
+ "headers = null, DateTime? deadline = null, CancellationToken "
+ "cancellationToken = default(CancellationToken))\n",
+ "methodname", method_name, "request_maybe",
+ GetMethodRequestParamMaybe(method), "returntype",
+ GetMethodReturnTypeClient(method));
out->Print("{\n");
out->Indent();
- out->Print("return $methodname$($request_maybe$new CallOptions(headers, deadline, cancellationToken));\n",
- "methodname", method_name,
- "request_maybe", GetMethodRequestParamMaybe(method, true));
+ out->Print(
+ "return $methodname$($request_maybe$new CallOptions(headers, deadline, "
+ "cancellationToken));\n",
+ "methodname", method_name, "request_maybe",
+ GetMethodRequestParamMaybe(method, true));
out->Outdent();
out->Print("}\n");
// overload taking CallOptions as a param
GenerateDocCommentBody(out, method);
out->Print(
- "public virtual $returntype$ $methodname$($request_maybe$CallOptions options)\n",
+ "public virtual $returntype$ $methodname$($request_maybe$CallOptions "
+ "options)\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method), "returntype",
GetMethodReturnTypeClient(method));
@@ -423,36 +463,45 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Indent();
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
- out->Print("return CallInvoker.AsyncUnaryCall($methodfield$, null, options, request);\n",
- "methodfield", GetMethodFieldName(method));
+ out->Print(
+ "return CallInvoker.AsyncUnaryCall($methodfield$, null, options, "
+ "request);\n",
+ "methodfield", GetMethodFieldName(method));
break;
case METHODTYPE_CLIENT_STREAMING:
- out->Print("return CallInvoker.AsyncClientStreamingCall($methodfield$, null, options);\n",
- "methodfield", GetMethodFieldName(method));
+ out->Print(
+ "return CallInvoker.AsyncClientStreamingCall($methodfield$, null, "
+ "options);\n",
+ "methodfield", GetMethodFieldName(method));
break;
case METHODTYPE_SERVER_STREAMING:
out->Print(
- "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, options, request);\n",
+ "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, "
+ "options, request);\n",
"methodfield", GetMethodFieldName(method));
break;
case METHODTYPE_BIDI_STREAMING:
- out->Print("return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, options);\n",
- "methodfield", GetMethodFieldName(method));
+ out->Print(
+ "return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, "
+ "options);\n",
+ "methodfield", GetMethodFieldName(method));
break;
default:
- GOOGLE_LOG(FATAL)<< "Can't get here.";
+ GOOGLE_LOG(FATAL) << "Can't get here.";
}
out->Outdent();
out->Print("}\n");
}
// override NewInstance method
- out->Print("protected override $name$ NewInstance(ClientBaseConfiguration configuration)\n",
- "name", GetClientClassName(service));
+ out->Print(
+ "protected override $name$ NewInstance(ClientBaseConfiguration "
+ "configuration)\n",
+ "name", GetClientClassName(service));
out->Print("{\n");
out->Indent();
- out->Print("return new $name$(configuration);\n",
- "name", GetClientClassName(service));
+ out->Print("return new $name$(configuration);\n", "name",
+ GetClientClassName(service));
out->Outdent();
out->Print("}\n");
@@ -461,11 +510,13 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Print("\n");
}
-void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
+void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
out->Print(
- "/// <summary>Creates service definition that can be registered with a server</summary>\n");
+ "/// <summary>Creates service definition that can be registered with a "
+ "server</summary>\n");
out->Print(
- "public static ServerServiceDefinition BindService($implclass$ serviceImpl)\n",
+ "public static ServerServiceDefinition BindService($implclass$ "
+ "serviceImpl)\n",
"implclass", GetServerClassName(service));
out->Print("{\n");
out->Indent();
@@ -491,7 +542,7 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
out->Print("\n");
}
-void GenerateService(Printer* out, const ServiceDescriptor *service,
+void GenerateService(Printer *out, const ServiceDescriptor *service,
bool generate_client, bool generate_server,
bool internal_access) {
GenerateDocCommentBody(out, service);
diff --git a/src/compiler/csharp_generator_helpers.h b/src/compiler/csharp_generator_helpers.h
index 9bdf6fb535..f5d36f257a 100644
--- a/src/compiler/csharp_generator_helpers.h
+++ b/src/compiler/csharp_generator_helpers.h
@@ -41,14 +41,16 @@ namespace grpc_csharp_generator {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
grpc::string *file_name_or_error) {
- *file_name_or_error = grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
+ *file_name_or_error =
+ grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
return true;
}
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
-inline grpc::string GetCsharpComments(const DescriptorType *desc, bool leading) {
+inline grpc::string GetCsharpComments(const DescriptorType *desc,
+ bool leading) {
return grpc_generator::GetPrefixedComments(desc, leading, "//");
}
diff --git a/src/compiler/csharp_plugin.cc b/src/compiler/csharp_plugin.cc
index 5350e73f10..7def72c54c 100644
--- a/src/compiler/csharp_plugin.cc
+++ b/src/compiler/csharp_plugin.cc
@@ -67,10 +67,8 @@ class CSharpGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
}
}
- grpc::string code = grpc_csharp_generator::GetServices(file,
- generate_client,
- generate_server,
- internal_access);
+ grpc::string code = grpc_csharp_generator::GetServices(
+ file, generate_client, generate_server, internal_access);
if (code.size() == 0) {
return true; // don't generate a file if there are no services
}
diff --git a/src/compiler/generator_helpers.h b/src/compiler/generator_helpers.h
index 9a88c2bfcc..88d96c0080 100644
--- a/src/compiler/generator_helpers.h
+++ b/src/compiler/generator_helpers.h
@@ -84,7 +84,7 @@ inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
}
str.replace(pos, from.length(), to);
pos += to.length();
- } while(replace_all);
+ } while (replace_all);
return str;
}
@@ -139,8 +139,8 @@ inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) {
return result;
}
-inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file,
- bool include_package_path) {
+inline grpc::string FileNameInUpperCamel(
+ const grpc::protobuf::FileDescriptor *file, bool include_package_path) {
std::vector<grpc::string> tokens = tokenize(StripProto(file->name()), "/");
grpc::string result = "";
if (include_package_path) {
@@ -152,7 +152,8 @@ inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *f
return result;
}
-inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file) {
+inline grpc::string FileNameInUpperCamel(
+ const grpc::protobuf::FileDescriptor *file) {
return FileNameInUpperCamel(file, true);
}
@@ -163,7 +164,8 @@ enum MethodType {
METHODTYPE_BIDI_STREAMING
};
-inline MethodType GetMethodType(const grpc::protobuf::MethodDescriptor *method) {
+inline MethodType GetMethodType(
+ const grpc::protobuf::MethodDescriptor *method) {
if (method->client_streaming()) {
if (method->server_streaming()) {
return METHODTYPE_BIDI_STREAMING;
@@ -254,7 +256,7 @@ inline grpc::string GenerateCommentsWithPrefix(
const std::vector<grpc::string> &in, const grpc::string &prefix) {
std::ostringstream oss;
for (auto it = in.begin(); it != in.end(); it++) {
- const grpc::string& elem = *it;
+ const grpc::string &elem = *it;
if (elem.empty()) {
oss << prefix << "\n";
} else if (elem[0] == ' ') {
diff --git a/src/compiler/node_generator.cc b/src/compiler/node_generator.cc
index 1fe090d17a..c3852020a3 100644
--- a/src/compiler/node_generator.cc
+++ b/src/compiler/node_generator.cc
@@ -67,15 +67,15 @@ grpc::string ModuleAlias(const grpc::string filename) {
// Given a filename like foo/bar/baz.proto, returns the corresponding JavaScript
// message file foo/bar/baz.js
-grpc::string GetJSMessageFilename(const grpc::string& filename) {
+grpc::string GetJSMessageFilename(const grpc::string &filename) {
grpc::string name = filename;
return grpc_generator::StripProto(name) + "_pb.js";
}
// Given a filename like foo/bar/baz.proto, returns the root directory
// path ../../
-grpc::string GetRootPath(const grpc::string& from_filename,
- const grpc::string& to_filename) {
+grpc::string GetRootPath(const grpc::string &from_filename,
+ const grpc::string &to_filename) {
if (to_filename.find("google/protobuf") == 0) {
// Well-known types (.proto files in the google/protobuf directory) are
// assumed to come from the 'google-protobuf' npm package. We may want to
@@ -96,21 +96,24 @@ grpc::string GetRootPath(const grpc::string& from_filename,
// Return the relative path to load to_file from the directory containing
// from_file, assuming that both paths are relative to the same directory
-grpc::string GetRelativePath(const grpc::string& from_file,
- const grpc::string& to_file) {
+grpc::string GetRelativePath(const grpc::string &from_file,
+ const grpc::string &to_file) {
return GetRootPath(from_file, to_file) + to_file;
}
/* Finds all message types used in all services in the file, and returns them
* as a map of fully qualified message type name to message descriptor */
-map<grpc::string, const Descriptor*> GetAllMessages(const FileDescriptor *file) {
- map<grpc::string, const Descriptor*> message_types;
- for (int service_num = 0; service_num < file->service_count(); service_num++) {
- const ServiceDescriptor* service = file->service(service_num);
- for (int method_num = 0; method_num < service->method_count(); method_num++) {
- const MethodDescriptor* method = service->method(method_num);
- const Descriptor* input_type = method->input_type();
- const Descriptor* output_type = method->output_type();
+map<grpc::string, const Descriptor *> GetAllMessages(
+ const FileDescriptor *file) {
+ map<grpc::string, const Descriptor *> message_types;
+ for (int service_num = 0; service_num < file->service_count();
+ service_num++) {
+ const ServiceDescriptor *service = file->service(service_num);
+ for (int method_num = 0; method_num < service->method_count();
+ method_num++) {
+ const MethodDescriptor *method = service->method(method_num);
+ const Descriptor *input_type = method->input_type();
+ const Descriptor *output_type = method->output_type();
message_types[input_type->name()] = input_type;
message_types[output_type->name()] = output_type;
}
@@ -118,7 +121,7 @@ map<grpc::string, const Descriptor*> GetAllMessages(const FileDescriptor *file)
return message_types;
}
-grpc::string MessageIdentifierName(const grpc::string& name) {
+grpc::string MessageIdentifierName(const grpc::string &name) {
return grpc_generator::StringReplace(name, ".", "_");
}
@@ -194,18 +197,18 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
out->Print(template_vars, "var $name$Service = exports.$name$Service = {\n");
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
- grpc::string method_name = grpc_generator::LowercaseFirstLetter(
- service->method(i)->name());
+ grpc::string method_name =
+ grpc_generator::LowercaseFirstLetter(service->method(i)->name());
out->Print(GetNodeComments(service->method(i), true).c_str());
- out->Print("$method_name$: ",
- "method_name", method_name);
+ out->Print("$method_name$: ", "method_name", method_name);
PrintMethod(service->method(i), out);
out->Print(",\n");
out->Print(GetNodeComments(service->method(i), false).c_str());
}
out->Outdent();
out->Print("};\n\n");
- out->Print(template_vars, "exports.$name$Client = "
+ out->Print(template_vars,
+ "exports.$name$Client = "
"grpc.makeGenericClientConstructor($name$Service);\n");
out->Print(GetNodeComments(service, false).c_str());
}
@@ -213,27 +216,25 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
void PrintImports(const FileDescriptor *file, Printer *out) {
out->Print("var grpc = require('grpc');\n");
if (file->message_type_count() > 0) {
- grpc::string file_path = GetRelativePath(file->name(),
- GetJSMessageFilename(
- file->name()));
- out->Print("var $module_alias$ = require('$file_path$');\n",
- "module_alias", ModuleAlias(file->name()),
- "file_path", file_path);
+ grpc::string file_path =
+ GetRelativePath(file->name(), GetJSMessageFilename(file->name()));
+ out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias",
+ ModuleAlias(file->name()), "file_path", file_path);
}
for (int i = 0; i < file->dependency_count(); i++) {
grpc::string file_path = GetRelativePath(
file->name(), GetJSMessageFilename(file->dependency(i)->name()));
- out->Print("var $module_alias$ = require('$file_path$');\n",
- "module_alias", ModuleAlias(file->dependency(i)->name()),
- "file_path", file_path);
+ out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias",
+ ModuleAlias(file->dependency(i)->name()), "file_path",
+ file_path);
}
out->Print("\n");
}
void PrintTransformers(const FileDescriptor *file, Printer *out) {
- map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
- for (std::map<grpc::string, const Descriptor*>::iterator it =
+ map<grpc::string, const Descriptor *> messages = GetAllMessages(file);
+ for (std::map<grpc::string, const Descriptor *>::iterator it =
messages.begin();
it != messages.end(); it++) {
PrintMessageTransformer(it->second, out);
@@ -246,7 +247,6 @@ void PrintServices(const FileDescriptor *file, Printer *out) {
PrintService(file->service(i), out);
}
}
-
}
grpc::string GenerateFile(const FileDescriptor *file) {
diff --git a/src/compiler/node_generator_helpers.h b/src/compiler/node_generator_helpers.h
index 5862772841..efe94ab00d 100644
--- a/src/compiler/node_generator_helpers.h
+++ b/src/compiler/node_generator_helpers.h
@@ -48,7 +48,7 @@ inline grpc::string GetJSServiceFilename(const grpc::string& filename) {
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
-inline grpc::string GetNodeComments(const DescriptorType *desc, bool leading) {
+inline grpc::string GetNodeComments(const DescriptorType* desc, bool leading) {
return grpc_generator::GetPrefixedComments(desc, leading, "//");
}
diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index 4be8cb4187..1d7faf120d 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -49,9 +49,9 @@ using ::std::map;
namespace grpc_objective_c_generator {
namespace {
-void PrintProtoRpcDeclarationAsPragma(Printer *printer,
- const MethodDescriptor *method,
- map< ::grpc::string, ::grpc::string> vars) {
+void PrintProtoRpcDeclarationAsPragma(
+ Printer *printer, const MethodDescriptor *method,
+ map< ::grpc::string, ::grpc::string> vars) {
vars["client_stream"] = method->client_streaming() ? "stream " : "";
vars["server_stream"] = method->server_streaming() ? "stream " : "";
@@ -61,7 +61,7 @@ void PrintProtoRpcDeclarationAsPragma(Printer *printer,
}
template <typename DescriptorType>
-static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
+static void PrintAllComments(const DescriptorType *desc, Printer *printer) {
std::vector<grpc::string> comments;
grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED,
&comments);
@@ -100,7 +100,8 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
if (method->server_streaming()) {
printer->Print(vars,
" eventHandler:(void(^)(BOOL done, "
- "$response_class$ *_Nullable response, NSError *_Nullable error))eventHandler");
+ "$response_class$ *_Nullable response, NSError *_Nullable "
+ "error))eventHandler");
} else {
printer->Print(vars,
" handler:(void(^)($response_class$ *_Nullable response, "
@@ -123,7 +124,8 @@ void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
PrintMethodSignature(printer, method, vars);
}
-inline map< ::grpc::string, ::grpc::string> GetMethodVars(const MethodDescriptor *method) {
+inline map< ::grpc::string, ::grpc::string> GetMethodVars(
+ const MethodDescriptor *method) {
map< ::grpc::string, ::grpc::string> res;
res["method_name"] = method->name();
res["request_type"] = method->input_type()->name();
@@ -210,7 +212,8 @@ void PrintMethodImplementations(Printer *printer,
grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$');
- map< ::grpc::string, ::grpc::string> vars = {{"service_class", ServiceClassName(service)}};
+ map< ::grpc::string, ::grpc::string> vars = {
+ {"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
@@ -237,21 +240,23 @@ void PrintMethodImplementations(Printer *printer,
}
::grpc::string GetSource(const ServiceDescriptor *service) {
- ::grpc::string output;
+ ::grpc::string output;
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$');
- map< ::grpc::string,::grpc::string> vars = {{"service_name", service->name()},
- {"service_class", ServiceClassName(service)},
- {"package", service->file()->package()}};
+ map< ::grpc::string, ::grpc::string> vars = {
+ {"service_name", service->name()},
+ {"service_class", ServiceClassName(service)},
+ {"package", service->file()->package()}};
printer.Print(vars, "@implementation $service_class$\n\n");
printer.Print("// Designated initializer\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
- printer.Print(vars,
+ printer.Print(
+ vars,
" return (self = [super initWithHost:host"
" packageName:@\"$package$\" serviceName:@\"$service_name$\"]);\n");
printer.Print("}\n\n");
diff --git a/src/compiler/objective_c_generator_helpers.h b/src/compiler/objective_c_generator_helpers.h
index 1f8c80014f..b482f028a1 100644
--- a/src/compiler/objective_c_generator_helpers.h
+++ b/src/compiler/objective_c_generator_helpers.h
@@ -53,6 +53,5 @@ inline string ServiceClassName(const ServiceDescriptor *service) {
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
-
}
#endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H
diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc
index be64776402..8de0997ebe 100644
--- a/src/compiler/objective_c_plugin.cc
+++ b/src/compiler/objective_c_plugin.cc
@@ -42,7 +42,8 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
-using ::google::protobuf::compiler::objectivec::IsProtobufLibraryBundledProtoFile;
+using ::google::protobuf::compiler::objectivec::
+ IsProtobufLibraryBundledProtoFile;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
@@ -53,7 +54,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
const ::grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
::grpc::string *error) const {
-
if (file->service_count() == 0) {
// No services. Do nothing.
return true;
@@ -66,29 +66,32 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
// Generate .pbrpc.h
::grpc::string imports = ::grpc::string("#import \"") + file_name +
- ".pbobjc.h\"\n\n"
- "#import <ProtoRPC/ProtoService.h>\n"
- "#import <RxLibrary/GRXWriteable.h>\n"
- "#import <RxLibrary/GRXWriter.h>\n";
+ ".pbobjc.h\"\n\n"
+ "#import <ProtoRPC/ProtoService.h>\n"
+ "#import <RxLibrary/GRXWriteable.h>\n"
+ "#import <RxLibrary/GRXWriter.h>\n";
// TODO(jcanizales): Instead forward-declare the input and output types
// and import the files in the .pbrpc.m
::grpc::string proto_imports;
for (int i = 0; i < file->dependency_count(); i++) {
- ::grpc::string header = grpc_objective_c_generator::MessageHeaderName(
- file->dependency(i));
+ ::grpc::string header =
+ grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
const grpc::protobuf::FileDescriptor *dependency = file->dependency(i);
if (IsProtobufLibraryBundledProtoFile(dependency)) {
::grpc::string base_name = header;
grpc_generator::StripPrefix(&base_name, "google/protobuf/");
// create the import code snippet
proto_imports +=
- "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
- " #import <" + ::grpc::string(ProtobufLibraryFrameworkName) +
- "/" + base_name + ">\n"
- "#else\n"
- " #import \"" + header + "\"\n"
- "#endif\n";
+ "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
+ " #import <" +
+ ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
+ ">\n"
+ "#else\n"
+ " #import \"" +
+ header +
+ "\"\n"
+ "#endif\n";
} else {
proto_imports += ::grpc::string("#import \"") + header + "\"\n";
}
@@ -100,21 +103,22 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
declarations += grpc_objective_c_generator::GetHeader(service);
}
- static const ::grpc::string kNonNullBegin = "\nNS_ASSUME_NONNULL_BEGIN\n\n";
+ static const ::grpc::string kNonNullBegin =
+ "\nNS_ASSUME_NONNULL_BEGIN\n\n";
static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
- Write(context, file_name + ".pbrpc.h",
- imports + '\n' + proto_imports + '\n' + kNonNullBegin +
- declarations + kNonNullEnd);
+ Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports +
+ '\n' + kNonNullBegin +
+ declarations + kNonNullEnd);
}
{
// Generate .pbrpc.m
::grpc::string imports = ::grpc::string("#import \"") + file_name +
- ".pbrpc.h\"\n\n"
- "#import <ProtoRPC/ProtoRPC.h>\n"
- "#import <RxLibrary/GRXWriter+Immediate.h>\n";
+ ".pbrpc.h\"\n\n"
+ "#import <ProtoRPC/ProtoRPC.h>\n"
+ "#import <RxLibrary/GRXWriter+Immediate.h>\n";
::grpc::string definitions;
for (int i = 0; i < file->service_count(); i++) {
@@ -131,7 +135,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
private:
// Write the given code into the given file.
void Write(grpc::protobuf::compiler::GeneratorContext *context,
- const ::grpc::string &filename, const ::grpc::string &code) const {
+ const ::grpc::string &filename, const ::grpc::string &code) const {
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
context->Open(filename));
grpc::protobuf::io::CodedOutputStream coded_out(output.get());
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index c5f1ed8061..0f61b1fb6c 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -74,16 +74,18 @@ PythonGrpcGenerator::PythonGrpcGenerator(const GeneratorConfiguration& config)
PythonGrpcGenerator::~PythonGrpcGenerator() {}
-bool PythonGrpcGenerator::Generate(
- const FileDescriptor* file, const grpc::string& parameter,
- GeneratorContext* context, grpc::string* error) const {
+bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
+ const grpc::string& parameter,
+ GeneratorContext* context,
+ grpc::string* error) const {
// Get output file name.
grpc::string file_name;
static const int proto_suffix_length = strlen(".proto");
if (file->name().size() > static_cast<size_t>(proto_suffix_length) &&
file->name().find_last_of(".proto") == file->name().size() - 1) {
- file_name = file->name().substr(
- 0, file->name().size() - proto_suffix_length) + "_pb2.py";
+ file_name =
+ file->name().substr(0, file->name().size() - proto_suffix_length) +
+ "_pb2.py";
} else {
*error = "Invalid proto file name. Proto file must end with .proto";
return false;
@@ -115,7 +117,7 @@ map<grpc::string, grpc::string> ListToDict(
assert(values.size() % 2 == 0);
map<grpc::string, grpc::string> value_map;
auto value_iter = values.begin();
- for (unsigned i = 0; i < values.size()/2; ++i) {
+ for (unsigned i = 0; i < values.size() / 2; ++i) {
grpc::string key = *value_iter;
++value_iter;
grpc::string value = *value_iter;
@@ -138,9 +140,7 @@ class IndentScope {
printer_->Indent();
}
- ~IndentScope() {
- printer_->Outdent();
- }
+ ~IndentScope() { printer_->Outdent(); }
private:
Printer* printer_;
@@ -173,7 +173,6 @@ grpc::string ModuleAlias(const grpc::string& filename) {
return module_name;
}
-
bool GetModuleAndMessagePath(const Descriptor* type,
const ServiceDescriptor* service,
grpc::string* out) {
@@ -182,7 +181,7 @@ bool GetModuleAndMessagePath(const Descriptor* type,
do {
message_path.push_back(path_elem_type);
path_elem_type = path_elem_type->containing_type();
- } while (path_elem_type); // implicit nullptr comparison; don't be explicit
+ } while (path_elem_type); // implicit nullptr comparison; don't be explicit
grpc::string file_name = type->file()->name();
static const int proto_suffix_length = strlen(".proto");
if (!(file_name.size() > static_cast<size_t>(proto_suffix_length) &&
@@ -190,11 +189,11 @@ bool GetModuleAndMessagePath(const Descriptor* type,
return false;
}
grpc::string service_file_name = service->file()->name();
- grpc::string module = service_file_name == file_name ?
- "" : ModuleAlias(file_name) + ".";
+ grpc::string module =
+ service_file_name == file_name ? "" : ModuleAlias(file_name) + ".";
grpc::string message_type;
- for (auto path_iter = message_path.rbegin();
- path_iter != message_path.rend(); ++path_iter) {
+ for (auto path_iter = message_path.rbegin(); path_iter != message_path.rend();
+ ++path_iter) {
message_type += (*path_iter)->name() + ".";
}
// no pop_back prior to C++11
@@ -229,8 +228,7 @@ static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
printer->Print("\"\"\"\n");
}
-bool PrintBetaServicer(const ServiceDescriptor* service,
- Printer* out) {
+bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) {
out->Print("\n\n");
out->Print("class Beta$Service$Servicer(object):\n", "Service",
service->name());
@@ -239,10 +237,10 @@ bool PrintBetaServicer(const ServiceDescriptor* service,
PrintAllComments(service, out);
for (int i = 0; i < service->method_count(); ++i) {
auto meth = service->method(i);
- grpc::string arg_name = meth->client_streaming() ?
- "request_iterator" : "request";
- out->Print("def $Method$(self, $ArgName$, context):\n",
- "Method", meth->name(), "ArgName", arg_name);
+ grpc::string arg_name =
+ meth->client_streaming() ? "request_iterator" : "request";
+ out->Print("def $Method$(self, $ArgName$, context):\n", "Method",
+ meth->name(), "ArgName", arg_name);
{
IndentScope raii_method_indent(out);
PrintAllComments(meth, out);
@@ -253,8 +251,7 @@ bool PrintBetaServicer(const ServiceDescriptor* service,
return true;
}
-bool PrintBetaStub(const ServiceDescriptor* service,
- Printer* out) {
+bool PrintBetaStub(const ServiceDescriptor* service, Printer* out) {
out->Print("\n\n");
out->Print("class Beta$Service$Stub(object):\n", "Service", service->name());
{
@@ -262,10 +259,12 @@ bool PrintBetaStub(const ServiceDescriptor* service,
PrintAllComments(service, out);
for (int i = 0; i < service->method_count(); ++i) {
const MethodDescriptor* meth = service->method(i);
- grpc::string arg_name = meth->client_streaming() ?
- "request_iterator" : "request";
+ grpc::string arg_name =
+ meth->client_streaming() ? "request_iterator" : "request";
auto methdict = ListToDict({"Method", meth->name(), "ArgName", arg_name});
- out->Print(methdict, "def $Method$(self, $ArgName$, timeout, metadata=None, with_call=False, protocol_options=None):\n");
+ out->Print(methdict,
+ "def $Method$(self, $ArgName$, timeout, metadata=None, "
+ "with_call=False, protocol_options=None):\n");
{
IndentScope raii_method_indent(out);
PrintAllComments(meth, out);
@@ -282,9 +281,10 @@ bool PrintBetaStub(const ServiceDescriptor* service,
bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
const ServiceDescriptor* service, Printer* out) {
out->Print("\n\n");
- out->Print("def beta_create_$Service$_server(servicer, pool=None, "
- "pool_size=None, default_timeout=None, maximum_timeout=None):\n",
- "Service", service->name());
+ out->Print(
+ "def beta_create_$Service$_server(servicer, pool=None, "
+ "pool_size=None, default_timeout=None, maximum_timeout=None):\n",
+ "Service", service->name());
{
IndentScope raii_create_server_indent(out);
map<grpc::string, grpc::string> method_implementation_constructors;
@@ -315,58 +315,62 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
}
out->Print("request_deserializers = {\n");
for (auto name_and_input_module_class_pair =
- input_message_modules_and_classes.begin();
+ input_message_modules_and_classes.begin();
name_and_input_module_class_pair !=
- input_message_modules_and_classes.end();
+ input_message_modules_and_classes.end();
name_and_input_module_class_pair++) {
IndentScope raii_indent(out);
- out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
- "$InputTypeModuleAndClass$.FromString,\n",
- "PackageQualifiedServiceName", package_qualified_service_name,
- "MethodName", name_and_input_module_class_pair->first,
- "InputTypeModuleAndClass",
- name_and_input_module_class_pair->second);
+ out->Print(
+ "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+ "$InputTypeModuleAndClass$.FromString,\n",
+ "PackageQualifiedServiceName", package_qualified_service_name,
+ "MethodName", name_and_input_module_class_pair->first,
+ "InputTypeModuleAndClass", name_and_input_module_class_pair->second);
}
out->Print("}\n");
out->Print("response_serializers = {\n");
for (auto name_and_output_module_class_pair =
- output_message_modules_and_classes.begin();
+ output_message_modules_and_classes.begin();
name_and_output_module_class_pair !=
- output_message_modules_and_classes.end();
+ output_message_modules_and_classes.end();
name_and_output_module_class_pair++) {
IndentScope raii_indent(out);
- out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
- "$OutputTypeModuleAndClass$.SerializeToString,\n",
- "PackageQualifiedServiceName", package_qualified_service_name,
- "MethodName", name_and_output_module_class_pair->first,
- "OutputTypeModuleAndClass",
- name_and_output_module_class_pair->second);
+ out->Print(
+ "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+ "$OutputTypeModuleAndClass$.SerializeToString,\n",
+ "PackageQualifiedServiceName", package_qualified_service_name,
+ "MethodName", name_and_output_module_class_pair->first,
+ "OutputTypeModuleAndClass",
+ name_and_output_module_class_pair->second);
}
out->Print("}\n");
out->Print("method_implementations = {\n");
for (auto name_and_implementation_constructor =
- method_implementation_constructors.begin();
- name_and_implementation_constructor !=
- method_implementation_constructors.end();
- name_and_implementation_constructor++) {
+ method_implementation_constructors.begin();
+ name_and_implementation_constructor !=
+ method_implementation_constructors.end();
+ name_and_implementation_constructor++) {
IndentScope raii_descriptions_indent(out);
const grpc::string method_name =
name_and_implementation_constructor->first;
- out->Print("(\'$PackageQualifiedServiceName$\', \'$Method$\'): "
- "face_utilities.$Constructor$(servicer.$Method$),\n",
- "PackageQualifiedServiceName", package_qualified_service_name,
- "Method", name_and_implementation_constructor->first,
- "Constructor", name_and_implementation_constructor->second);
+ out->Print(
+ "(\'$PackageQualifiedServiceName$\', \'$Method$\'): "
+ "face_utilities.$Constructor$(servicer.$Method$),\n",
+ "PackageQualifiedServiceName", package_qualified_service_name,
+ "Method", name_and_implementation_constructor->first, "Constructor",
+ name_and_implementation_constructor->second);
}
out->Print("}\n");
- out->Print("server_options = beta_implementations.server_options("
- "request_deserializers=request_deserializers, "
- "response_serializers=response_serializers, "
- "thread_pool=pool, thread_pool_size=pool_size, "
- "default_timeout=default_timeout, "
- "maximum_timeout=maximum_timeout)\n");
- out->Print("return beta_implementations.server(method_implementations, "
- "options=server_options)\n");
+ out->Print(
+ "server_options = beta_implementations.server_options("
+ "request_deserializers=request_deserializers, "
+ "response_serializers=response_serializers, "
+ "thread_pool=pool, thread_pool_size=pool_size, "
+ "default_timeout=default_timeout, "
+ "maximum_timeout=maximum_timeout)\n");
+ out->Print(
+ "return beta_implementations.server(method_implementations, "
+ "options=server_options)\n");
}
return true;
}
@@ -374,10 +378,11 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
const ServiceDescriptor* service, Printer* out) {
map<grpc::string, grpc::string> dict = ListToDict({
- "Service", service->name(),
- });
+ "Service", service->name(),
+ });
out->Print("\n\n");
- out->Print(dict, "def beta_create_$Service$_stub(channel, host=None,"
+ out->Print(dict,
+ "def beta_create_$Service$_stub(channel, host=None,"
" metadata_transformer=None, pool=None, pool_size=None):\n");
{
IndentScope raii_create_server_indent(out);
@@ -387,8 +392,7 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
for (int i = 0; i < service->method_count(); ++i) {
const MethodDescriptor* method = service->method(i);
const grpc::string method_cardinality =
- grpc::string(method->client_streaming() ? "STREAM" : "UNARY") +
- "_" +
+ grpc::string(method->client_streaming() ? "STREAM" : "UNARY") + "_" +
grpc::string(method->server_streaming() ? "STREAM" : "UNARY");
grpc::string input_message_module_and_class;
if (!GetModuleAndMessagePath(method->input_type(), service,
@@ -409,32 +413,33 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
}
out->Print("request_serializers = {\n");
for (auto name_and_input_module_class_pair =
- input_message_modules_and_classes.begin();
+ input_message_modules_and_classes.begin();
name_and_input_module_class_pair !=
- input_message_modules_and_classes.end();
+ input_message_modules_and_classes.end();
name_and_input_module_class_pair++) {
IndentScope raii_indent(out);
- out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
- "$InputTypeModuleAndClass$.SerializeToString,\n",
- "PackageQualifiedServiceName", package_qualified_service_name,
- "MethodName", name_and_input_module_class_pair->first,
- "InputTypeModuleAndClass",
- name_and_input_module_class_pair->second);
+ out->Print(
+ "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+ "$InputTypeModuleAndClass$.SerializeToString,\n",
+ "PackageQualifiedServiceName", package_qualified_service_name,
+ "MethodName", name_and_input_module_class_pair->first,
+ "InputTypeModuleAndClass", name_and_input_module_class_pair->second);
}
out->Print("}\n");
out->Print("response_deserializers = {\n");
for (auto name_and_output_module_class_pair =
- output_message_modules_and_classes.begin();
+ output_message_modules_and_classes.begin();
name_and_output_module_class_pair !=
- output_message_modules_and_classes.end();
+ output_message_modules_and_classes.end();
name_and_output_module_class_pair++) {
IndentScope raii_indent(out);
- out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
- "$OutputTypeModuleAndClass$.FromString,\n",
- "PackageQualifiedServiceName", package_qualified_service_name,
- "MethodName", name_and_output_module_class_pair->first,
- "OutputTypeModuleAndClass",
- name_and_output_module_class_pair->second);
+ out->Print(
+ "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+ "$OutputTypeModuleAndClass$.FromString,\n",
+ "PackageQualifiedServiceName", package_qualified_service_name,
+ "MethodName", name_and_output_module_class_pair->first,
+ "OutputTypeModuleAndClass",
+ name_and_output_module_class_pair->second);
}
out->Print("}\n");
out->Print("cardinalities = {\n");
@@ -443,17 +448,19 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
name_and_cardinality++) {
IndentScope raii_descriptions_indent(out);
out->Print("\'$Method$\': cardinality.Cardinality.$Cardinality$,\n",
- "Method", name_and_cardinality->first,
- "Cardinality", name_and_cardinality->second);
+ "Method", name_and_cardinality->first, "Cardinality",
+ name_and_cardinality->second);
}
out->Print("}\n");
- out->Print("stub_options = beta_implementations.stub_options("
- "host=host, metadata_transformer=metadata_transformer, "
- "request_serializers=request_serializers, "
- "response_deserializers=response_deserializers, "
- "thread_pool=pool, thread_pool_size=pool_size)\n");
out->Print(
- "return beta_implementations.dynamic_stub(channel, \'$PackageQualifiedServiceName$\', "
+ "stub_options = beta_implementations.stub_options("
+ "host=host, metadata_transformer=metadata_transformer, "
+ "request_serializers=request_serializers, "
+ "response_deserializers=response_deserializers, "
+ "thread_pool=pool, thread_pool_size=pool_size)\n");
+ out->Print(
+ "return beta_implementations.dynamic_stub(channel, "
+ "\'$PackageQualifiedServiceName$\', "
"cardinalities, options=stub_options)\n",
"PackageQualifiedServiceName", package_qualified_service_name);
}
@@ -476,43 +483,41 @@ bool PrintStub(const grpc::string& package_qualified_service_name,
out->Print("Args:\n");
{
IndentScope raii_args_indent(out);
- out->Print("channel: A grpc.Channel.\n");
+ out->Print("channel: A grpc.Channel.\n");
}
out->Print("\"\"\"\n");
for (int i = 0; i < service->method_count(); ++i) {
auto method = service->method(i);
- auto multi_callable_constructor =
- grpc::string(method->client_streaming() ? "stream" : "unary") +
- "_" +
- grpc::string(method->server_streaming() ? "stream" : "unary");
- grpc::string request_module_and_class;
- if (!GetModuleAndMessagePath(method->input_type(), service,
- &request_module_and_class)) {
- return false;
- }
- grpc::string response_module_and_class;
- if (!GetModuleAndMessagePath(method->output_type(), service,
- &response_module_and_class)) {
+ auto multi_callable_constructor =
+ grpc::string(method->client_streaming() ? "stream" : "unary") +
+ "_" + grpc::string(method->server_streaming() ? "stream" : "unary");
+ grpc::string request_module_and_class;
+ if (!GetModuleAndMessagePath(method->input_type(), service,
+ &request_module_and_class)) {
+ return false;
+ }
+ grpc::string response_module_and_class;
+ if (!GetModuleAndMessagePath(method->output_type(), service,
+ &response_module_and_class)) {
return false;
- }
- out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n",
- "Method", method->name(),
- "MultiCallableConstructor", multi_callable_constructor);
- {
+ }
+ out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n",
+ "Method", method->name(), "MultiCallableConstructor",
+ multi_callable_constructor);
+ {
IndentScope raii_first_attribute_indent(out);
IndentScope raii_second_attribute_indent(out);
- out->Print(
- "'/$PackageQualifiedService$/$Method$',\n",
- "PackageQualifiedService", package_qualified_service_name,
- "Method", method->name());
- out->Print(
- "request_serializer=$RequestModuleAndClass$.SerializeToString,\n",
- "RequestModuleAndClass", request_module_and_class);
- out->Print(
+ out->Print("'/$PackageQualifiedService$/$Method$',\n",
+ "PackageQualifiedService", package_qualified_service_name,
+ "Method", method->name());
+ out->Print(
+ "request_serializer=$RequestModuleAndClass$.SerializeToString,\n",
+ "RequestModuleAndClass", request_module_and_class);
+ out->Print(
"response_deserializer=$ResponseModuleAndClass$.FromString,\n",
- "ResponseModuleAndClass", response_module_and_class);
- out->Print(")\n");
- }
+ "ResponseModuleAndClass", response_module_and_class);
+ out->Print(")\n");
+ }
}
}
}
@@ -527,11 +532,11 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) {
PrintAllComments(service, out);
for (int i = 0; i < service->method_count(); ++i) {
auto method = service->method(i);
- grpc::string arg_name = method->client_streaming() ?
- "request_iterator" : "request";
+ grpc::string arg_name =
+ method->client_streaming() ? "request_iterator" : "request";
out->Print("\n");
- out->Print("def $Method$(self, $ArgName$, context):\n",
- "Method", method->name(), "ArgName", arg_name);
+ out->Print("def $Method$(self, $ArgName$, context):\n", "Method",
+ method->name(), "ArgName", arg_name);
{
IndentScope raii_method_indent(out);
PrintAllComments(method, out);
@@ -544,11 +549,12 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) {
return true;
}
-bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name,
- const ServiceDescriptor* service, Printer* out) {
+bool PrintAddServicerToServer(
+ const grpc::string& package_qualified_service_name,
+ const ServiceDescriptor* service, Printer* out) {
out->Print("\n\n");
out->Print("def add_$Service$Servicer_to_server(servicer, server):\n",
- "Service", service->name());
+ "Service", service->name());
{
IndentScope raii_class_indent(out);
out->Print("rpc_method_handlers = {\n");
@@ -557,34 +563,37 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name
IndentScope raii_dict_second_indent(out);
for (int i = 0; i < service->method_count(); ++i) {
auto method = service->method(i);
- auto method_handler_constructor =
+ auto method_handler_constructor =
grpc::string(method->client_streaming() ? "stream" : "unary") +
- "_" +
+ "_" +
grpc::string(method->server_streaming() ? "stream" : "unary") +
"_rpc_method_handler";
- grpc::string request_module_and_class;
- if (!GetModuleAndMessagePath(method->input_type(), service,
- &request_module_and_class)) {
- return false;
- }
- grpc::string response_module_and_class;
- if (!GetModuleAndMessagePath(method->output_type(), service,
- &response_module_and_class)) {
+ grpc::string request_module_and_class;
+ if (!GetModuleAndMessagePath(method->input_type(), service,
+ &request_module_and_class)) {
return false;
- }
- out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n",
- "Method", method->name(),
- "MethodHandlerConstructor", method_handler_constructor);
- {
+ }
+ grpc::string response_module_and_class;
+ if (!GetModuleAndMessagePath(method->output_type(), service,
+ &response_module_and_class)) {
+ return false;
+ }
+ out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n", "Method",
+ method->name(), "MethodHandlerConstructor",
+ method_handler_constructor);
+ {
IndentScope raii_call_first_indent(out);
- IndentScope raii_call_second_indent(out);
- out->Print("servicer.$Method$,\n", "Method", method->name());
- out->Print("request_deserializer=$RequestModuleAndClass$.FromString,\n",
- "RequestModuleAndClass", request_module_and_class);
- out->Print("response_serializer=$ResponseModuleAndClass$.SerializeToString,\n",
- "ResponseModuleAndClass", response_module_and_class);
- }
- out->Print("),\n");
+ IndentScope raii_call_second_indent(out);
+ out->Print("servicer.$Method$,\n", "Method", method->name());
+ out->Print(
+ "request_deserializer=$RequestModuleAndClass$.FromString,\n",
+ "RequestModuleAndClass", request_module_and_class);
+ out->Print(
+ "response_serializer=$ResponseModuleAndClass$.SerializeToString,"
+ "\n",
+ "ResponseModuleAndClass", response_module_and_class);
+ }
+ out->Print("),\n");
}
}
out->Print("}\n");
@@ -593,7 +602,7 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name
IndentScope raii_call_first_indent(out);
IndentScope raii_call_second_indent(out);
out->Print("'$PackageQualifiedServiceName$', rpc_method_handlers)\n",
- "PackageQualifiedServiceName", package_qualified_service_name);
+ "PackageQualifiedServiceName", package_qualified_service_name);
}
out->Print("server.add_generic_rpc_handlers((generic_handler,))\n");
}
@@ -605,10 +614,12 @@ bool PrintPreamble(const FileDescriptor* file,
out->Print("import $Package$\n", "Package", config.grpc_package_root);
out->Print("from $Package$ import implementations as beta_implementations\n",
"Package", config.beta_package_root);
- out->Print("from $Package$ import interfaces as beta_interfaces\n",
- "Package", config.beta_package_root);
+ out->Print("from $Package$ import interfaces as beta_interfaces\n", "Package",
+ config.beta_package_root);
out->Print("from grpc.framework.common import cardinality\n");
- out->Print("from grpc.framework.interfaces.face import utilities as face_utilities\n");
+ out->Print(
+ "from grpc.framework.interfaces.face import utilities as "
+ "face_utilities\n");
return true;
}
@@ -632,12 +643,14 @@ pair<bool, grpc::string> GetServices(const FileDescriptor* file,
auto service = file->service(i);
auto package_qualified_service_name = package + service->name();
if (!(PrintStub(package_qualified_service_name, service, &out) &&
- PrintServicer(service, &out) &&
- PrintAddServicerToServer(package_qualified_service_name, service, &out) &&
- PrintBetaServicer(service, &out) &&
- PrintBetaStub(service, &out) &&
- PrintBetaServerFactory(package_qualified_service_name, service, &out) &&
- PrintBetaStubFactory(package_qualified_service_name, service, &out))) {
+ PrintServicer(service, &out) &&
+ PrintAddServicerToServer(package_qualified_service_name, service,
+ &out) &&
+ PrintBetaServicer(service, &out) && PrintBetaStub(service, &out) &&
+ PrintBetaServerFactory(package_qualified_service_name, service,
+ &out) &&
+ PrintBetaStubFactory(package_qualified_service_name, service,
+ &out))) {
return make_pair(false, "");
}
}
diff --git a/src/compiler/python_generator.h b/src/compiler/python_generator.h
index 7ed99eff0b..9bbb83bca6 100644
--- a/src/compiler/python_generator.h
+++ b/src/compiler/python_generator.h
@@ -57,6 +57,7 @@ class PythonGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
const grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
grpc::string* error) const;
+
private:
GeneratorConfiguration config_;
};
diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc
index 1501c3f3e0..02202568cb 100644
--- a/src/compiler/ruby_generator.cc
+++ b/src/compiler/ruby_generator.cc
@@ -55,17 +55,20 @@ namespace {
// Prints out the method using the ruby gRPC DSL.
void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
Printer *out) {
- grpc::string input_type = RubyTypeOf(method->input_type()->full_name(), package);
+ grpc::string input_type =
+ RubyTypeOf(method->input_type()->full_name(), package);
if (method->client_streaming()) {
input_type = "stream(" + input_type + ")";
}
- grpc::string output_type = RubyTypeOf(method->output_type()->full_name(), package);
+ grpc::string output_type =
+ RubyTypeOf(method->output_type()->full_name(), package);
if (method->server_streaming()) {
output_type = "stream(" + output_type + ")";
}
- std::map<grpc::string, grpc::string> method_vars =
- ListToDict({"mth.name", method->name(), "input.type", input_type,
- "output.type", output_type, });
+ std::map<grpc::string, grpc::string> method_vars = ListToDict({
+ "mth.name", method->name(), "input.type", input_type, "output.type",
+ output_type,
+ });
out->Print(GetRubyComments(method, true).c_str());
out->Print(method_vars, "rpc :$mth.name$, $input.type$, $output.type$\n");
out->Print(GetRubyComments(method, false).c_str());
@@ -79,8 +82,9 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
}
// Begin the service module
- std::map<grpc::string, grpc::string> module_vars =
- ListToDict({"module.name", CapitalizeFirst(service->name()), });
+ std::map<grpc::string, grpc::string> module_vars = ListToDict({
+ "module.name", CapitalizeFirst(service->name()),
+ });
out->Print(module_vars, "module $module.name$\n");
out->Indent();
@@ -130,8 +134,9 @@ grpc::string GetServices(const FileDescriptor *file) {
}
// Write out a file header.
- std::map<grpc::string, grpc::string> header_comment_vars = ListToDict(
- {"file.name", file->name(), "file.package", file->package(), });
+ std::map<grpc::string, grpc::string> header_comment_vars = ListToDict({
+ "file.name", file->name(), "file.package", file->package(),
+ });
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
"# Source: $file.name$ for package '$file.package$'\n");
@@ -147,16 +152,18 @@ grpc::string GetServices(const FileDescriptor *file) {
// Write out require statemment to import the separately generated file
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
- std::map<grpc::string, grpc::string> dep_vars =
- ListToDict({"dep.name", MessagesRequireName(file), });
+ std::map<grpc::string, grpc::string> dep_vars = ListToDict({
+ "dep.name", MessagesRequireName(file),
+ });
out.Print(dep_vars, "require '$dep.name$'\n");
// Write out services within the modules
out.Print("\n");
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
- std::map<grpc::string, grpc::string> module_vars =
- ListToDict({"module.name", CapitalizeFirst(modules[i]), });
+ std::map<grpc::string, grpc::string> module_vars = ListToDict({
+ "module.name", CapitalizeFirst(modules[i]),
+ });
out.Print(module_vars, "module $module.name$\n");
out.Indent();
}
diff --git a/src/compiler/ruby_generator_map-inl.h b/src/compiler/ruby_generator_map-inl.h
index 6b87774f21..75ba7fc8f1 100644
--- a/src/compiler/ruby_generator_map-inl.h
+++ b/src/compiler/ruby_generator_map-inl.h
@@ -36,8 +36,8 @@
#include "src/compiler/config.h"
-#include <iostream>
#include <initializer_list>
+#include <iostream>
#include <map>
#include <ostream> // NOLINT
#include <vector>
@@ -53,8 +53,7 @@ namespace grpc_ruby_generator {
inline std::map<grpc::string, grpc::string> ListToDict(
const initializer_list<grpc::string> &values) {
if (values.size() % 2 != 0) {
- std::cerr << "Not every 'key' has a value in `values`."
- << std::endl;
+ std::cerr << "Not every 'key' has a value in `values`." << std::endl;
}
std::map<grpc::string, grpc::string> value_map;
auto value_iter = values.begin();
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index 721ba82d8f..9acacbd92d 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -91,11 +91,13 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
connector *c = arg;
grpc_closure *notify;
gpr_mu_lock(&c->mu);
+ grpc_error *error = GRPC_ERROR_NONE;
if (c->connecting_endpoint == NULL) {
memset(c->result, 0, sizeof(*c->result));
gpr_mu_unlock(&c->mu);
} else if (status != GRPC_SECURITY_OK) {
- gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status);
+ error = grpc_error_set_int(GRPC_ERROR_CREATE("Secure handshake failed"),
+ GRPC_ERROR_INT_SECURITY_STATUS, status);
memset(c->result, 0, sizeof(*c->result));
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
@@ -113,7 +115,7 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
notify = c->notify;
c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
}
static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index bd87253ed3..7d5279b9da 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -36,11 +36,14 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
+extern int grpc_http_write_state_trace;
+
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
+ grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 38e782b9b4..be8a8f8498 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -48,6 +48,7 @@
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
#include "src/core/ext/transport/chttp2/transport/timeout_encoding.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -60,9 +61,9 @@
#define DEFAULT_MAX_HEADER_LIST_SIZE (16 * 1024)
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
-
int grpc_http_trace = 0;
int grpc_flowctl_trace = 0;
+int grpc_http_write_state_trace = 0;
#define TRANSPORT_FROM_WRITING(tw) \
((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
@@ -88,10 +89,16 @@ static const grpc_transport_vtable vtable;
static void writing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
static void reading_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
static void parsing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
+static void initiate_writing(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+
+static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
+static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_error *error);
/** Set a transport level setting, and push it to our peer */
-static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
- uint32_t value);
+static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value);
/** Start disconnection chain */
static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -137,7 +144,7 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
static void incoming_byte_stream_update_flow_control(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
size_t have_already);
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
@@ -201,6 +208,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(t);
}
+/*#define REFCOUNTING_DEBUG 1*/
#ifdef REFCOUNTING_DEBUG
#define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__)
#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__)
@@ -231,7 +239,7 @@ static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
- grpc_endpoint *ep, uint8_t is_client) {
+ grpc_endpoint *ep, bool is_client) {
size_t i;
int j;
@@ -273,6 +281,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure_init(&t->writing_action, writing_action, t);
grpc_closure_init(&t->reading_action, reading_action, t);
grpc_closure_init(&t->parsing_action, parsing_action, t);
+ grpc_closure_init(&t->initiate_writing, initiate_writing, t);
gpr_slice_buffer_init(&t->parsing.qbuf);
grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
@@ -286,6 +295,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_slice_buffer_add(
&t->global.qbuf,
gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+ grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "initial_write");
}
/* 8 is a random stab in the dark as to a good initial size: it's small enough
that it shouldn't waste memory for infrequently used connections, yet
@@ -311,11 +321,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
/* configure http2 the way we like it */
if (is_client) {
- push_setting(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
- push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- push_setting(t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, DEFAULT_WINDOW);
- push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ DEFAULT_WINDOW);
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
if (channel_args) {
@@ -329,7 +340,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_ERROR, "%s: must be an integer",
GRPC_ARG_MAX_CONCURRENT_STREAMS);
} else {
- push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
(uint32_t)channel_args->args[i].value.integer);
}
} else if (0 == strcmp(channel_args->args[i].key,
@@ -368,7 +379,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_ERROR, "%s: must be non-negative",
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER);
} else {
- push_setting(t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
(uint32_t)channel_args->args[i].value.integer);
}
} else if (0 == strcmp(channel_args->args[i].key,
@@ -393,7 +404,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_ERROR, "%s: must be non-negative",
GRPC_ARG_MAX_METADATA_SIZE);
} else {
- push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
(uint32_t)channel_args->args[i].value.integer);
}
}
@@ -444,6 +455,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_error *error) {
if (!t->closed) {
+ if (grpc_http_write_state_trace) {
+ gpr_log(GPR_DEBUG, "W:%p close transport", t);
+ }
t->closed = 1;
connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
@@ -513,6 +527,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
&s->global.received_trailing_metadata);
grpc_chttp2_data_parser_init(&s->parsing.data_parser);
gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
+ s->global.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
REF_TRANSPORT(t, "stream");
@@ -589,7 +604,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_metadata_buffer_destroy(
&s->global.received_trailing_metadata);
gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
- GRPC_ERROR_UNREF(s->global.removal_error);
+ GRPC_ERROR_UNREF(s->global.read_closed_error);
+ GRPC_ERROR_UNREF(s->global.write_closed_error);
UNREF_TRANSPORT(exec_ctx, t, "stream");
@@ -633,6 +649,36 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
* LOCK MANAGEMENT
*/
+static const char *write_state_name(grpc_chttp2_write_state state) {
+ switch (state) {
+ case GRPC_CHTTP2_WRITING_INACTIVE:
+ return "INACTIVE";
+ case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER:
+ return "REQUESTED[p=0]";
+ case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER:
+ return "REQUESTED[p=1]";
+ case GRPC_CHTTP2_WRITE_SCHEDULED:
+ return "SCHEDULED";
+ case GRPC_CHTTP2_WRITING:
+ return "WRITING";
+ case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
+ return "WRITING[p=1]";
+ case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ return "WRITING[p=0]";
+ }
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
+}
+
+static void set_write_state(grpc_chttp2_transport *t,
+ grpc_chttp2_write_state state, const char *reason) {
+ if (grpc_http_write_state_trace) {
+ gpr_log(GPR_DEBUG, "W:%p %s -> %s because %s", t,
+ write_state_name(t->executor.write_state), write_state_name(state),
+ reason);
+ }
+ t->executor.write_state = state;
+}
+
static void finish_global_actions(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_executor_action_header *hdr;
@@ -641,13 +687,6 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("finish_global_actions", 0);
for (;;) {
- if (!t->executor.writing_active && !t->closed &&
- grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) {
- t->executor.writing_active = 1;
- REF_TRANSPORT(t, "writing");
- prevent_endpoint_shutdown(t);
- grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
- }
check_read_ops(exec_ctx, &t->global);
gpr_mu_lock(&t->executor.mu);
@@ -668,8 +707,28 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
continue;
} else {
t->executor.global_active = false;
+ switch (t->executor.write_state) {
+ case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER:
+ set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "unlocking");
+ REF_TRANSPORT(t, "initiate_writing");
+ gpr_mu_unlock(&t->executor.mu);
+ grpc_exec_ctx_sched(
+ exec_ctx, &t->initiate_writing, GRPC_ERROR_NONE,
+ t->ep != NULL ? grpc_endpoint_get_workqueue(t->ep) : NULL);
+ break;
+ case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER:
+ start_writing(exec_ctx, t);
+ gpr_mu_unlock(&t->executor.mu);
+ break;
+ case GRPC_CHTTP2_WRITING_INACTIVE:
+ case GRPC_CHTTP2_WRITING:
+ case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
+ case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ case GRPC_CHTTP2_WRITE_SCHEDULED:
+ gpr_mu_unlock(&t->executor.mu);
+ break;
+ }
}
- gpr_mu_unlock(&t->executor.mu);
break;
}
@@ -740,16 +799,118 @@ void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx,
* OUTPUT PROCESSING
*/
-void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
+void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ bool covered_by_poller, const char *reason) {
+ /* Perform state checks, and transition to a scheduled state if appropriate.
+ Each time we finish the global lock execution, we check if we need to
+ write. If we do:
+ - (if there is a poller surrounding the write) schedule
+ initiate_writing, which locks and calls initiate_writing_locked to...
+ - call start_writing, which verifies (under the global lock) that there
+ are things that need to be written by calling
+ grpc_chttp2_unlocking_check_writes, and if so schedules writing_action
+ against the current exec_ctx, to be executed OUTSIDE of the global lock
+ - eventually writing_action results in grpc_chttp2_terminate_writing being
+ called, which re-takes the global lock, updates state, checks if we need
+ to do *another* write immediately, and if so loops back to
+ start_writing.
+
+ Current problems:
+ - too much lock entry/exiting
+ - the writing thread can become stuck indefinitely (punt through the
+ workqueue periodically to fix) */
+
+ grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
+ switch (t->executor.write_state) {
+ case GRPC_CHTTP2_WRITING_INACTIVE:
+ set_write_state(t, covered_by_poller
+ ? GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER
+ : GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER,
+ reason);
+ break;
+ case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER:
+ /* nothing to do: write already requested */
+ break;
+ case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER:
+ if (covered_by_poller) {
+ /* upgrade to note poller is available to cover the write */
+ set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER, reason);
+ }
+ break;
+ case GRPC_CHTTP2_WRITE_SCHEDULED:
+ /* nothing to do: write already scheduled */
+ break;
+ case GRPC_CHTTP2_WRITING:
+ set_write_state(t,
+ covered_by_poller ? GRPC_CHTTP2_WRITING_STALE_WITH_POLLER
+ : GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
+ reason);
+ break;
+ case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
+ /* nothing to do: write already requested */
+ break;
+ case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ if (covered_by_poller) {
+ /* upgrade to note poller is available to cover the write */
+ set_write_state(t, GRPC_CHTTP2_WRITING_STALE_WITH_POLLER, reason);
+ }
+ break;
+ }
+}
+
+static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED ||
+ t->executor.write_state == GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER);
+ if (!t->closed &&
+ grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) {
+ set_write_state(t, GRPC_CHTTP2_WRITING, "start_writing");
+ REF_TRANSPORT(t, "writing");
+ prevent_endpoint_shutdown(t);
+ grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
+ } else {
+ if (t->closed) {
+ set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
+ "start_writing:transport_closed");
+ } else {
+ set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
+ "start_writing:nothing_to_write");
+ }
+ end_waiting_for_write(exec_ctx, t, GRPC_ERROR_CREATE("Nothing to write"));
+ if (t->ep && !t->endpoint_reading) {
+ destroy_endpoint(exec_ctx, t);
+ }
+ }
+}
+
+static void initiate_writing_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s_unused,
+ void *arg_ignored) {
+ start_writing(exec_ctx, t);
+ UNREF_TRANSPORT(exec_ctx, t, "initiate_writing");
+}
+
+static void initiate_writing(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_run_with_global_lock(exec_ctx, arg, NULL, initiate_writing_locked,
+ NULL, 0);
+}
+
+void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ bool covered_by_poller, const char *reason) {
if (!TRANSPORT_FROM_GLOBAL(transport_global)->closed &&
grpc_chttp2_list_add_writable_stream(transport_global, stream_global)) {
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, covered_by_poller,
+ reason);
}
}
-static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
- uint32_t value) {
+static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value) {
const grpc_chttp2_setting_parameters *sp =
&grpc_chttp2_settings_parameters[id];
uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
@@ -760,9 +921,22 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
if (use_value != t->global.settings[GRPC_LOCAL_SETTINGS][id]) {
t->global.settings[GRPC_LOCAL_SETTINGS][id] = use_value;
t->global.dirtied_local_settings = 1;
+ grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "push_setting");
}
}
+static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_error *error) {
+ grpc_chttp2_stream_global *stream_global;
+ while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global,
+ &stream_global)) {
+ fail_pending_writes(exec_ctx, &t->global, stream_global,
+ GRPC_ERROR_REF(error));
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s_ignored,
@@ -777,24 +951,32 @@ static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx,
grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
- grpc_chttp2_stream_global *stream_global;
- while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global,
- &stream_global)) {
- fail_pending_writes(exec_ctx, &t->global, stream_global,
- GRPC_ERROR_REF(error));
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
+ end_waiting_for_write(exec_ctx, t, error);
+
+ switch (t->executor.write_state) {
+ case GRPC_CHTTP2_WRITING_INACTIVE:
+ case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER:
+ case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER:
+ case GRPC_CHTTP2_WRITE_SCHEDULED:
+ GPR_UNREACHABLE_CODE(break);
+ case GRPC_CHTTP2_WRITING:
+ set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "terminate_writing");
+ break;
+ case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
+ set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER,
+ "terminate_writing");
+ break;
+ case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER,
+ "terminate_writing");
+ break;
}
- /* leave the writing flag up on shutdown to prevent further writes in
- unlock()
- from starting */
- t->executor.writing_active = 0;
if (t->ep && !t->endpoint_reading) {
destroy_endpoint(exec_ctx, t);
}
UNREF_TRANSPORT(exec_ctx, t, "writing");
- GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
@@ -877,7 +1059,8 @@ static void maybe_start_some_streams(
stream_global->id, STREAM_FROM_GLOBAL(stream_global));
stream_global->in_stream_map = true;
transport_global->concurrent_stream_count++;
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, true,
+ "new_stream");
}
/* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@@ -988,6 +1171,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
const size_t metadata_peer_limit =
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ if (transport_global->is_client) {
+ stream_global->deadline =
+ gpr_time_min(stream_global->deadline,
+ stream_global->send_initial_metadata->deadline);
+ }
if (metadata_size > metadata_peer_limit) {
cancel_from_api(
exec_ctx, transport_global, stream_global,
@@ -1012,9 +1200,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
maybe_start_some_streams(exec_ctx, transport_global);
} else {
GPR_ASSERT(stream_global->id != 0);
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ true, "op.send_initial_metadata");
}
} else {
+ stream_global->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished,
@@ -1036,7 +1226,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
} else {
stream_global->send_message = op->send_message;
if (stream_global->id != 0) {
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ true, "op.send_message");
}
}
}
@@ -1069,6 +1260,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
if (stream_global->write_closed) {
+ stream_global->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_trailing_metadata_finished,
@@ -1079,7 +1271,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
} else if (stream_global->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ true, "op.send_trailing_metadata");
}
}
}
@@ -1100,8 +1293,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
(stream_global->incoming_frames.head == NULL ||
stream_global->incoming_frames.head->is_tail)) {
incoming_byte_stream_update_flow_control(
- transport_global, stream_global, transport_global->stream_lookahead,
- 0);
+ exec_ctx, transport_global, stream_global,
+ transport_global->stream_lookahead, 0);
}
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
@@ -1129,7 +1322,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
sizeof(*op));
}
-static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) {
+static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_closure *on_recv) {
grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p));
p->next = &t->global.pings;
p->prev = p->next->prev;
@@ -1144,6 +1338,7 @@ static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) {
p->id[7] = (uint8_t)(t->global.ping_counter & 0xff);
p->on_recv = on_recv;
gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
+ grpc_chttp2_initiate_write(exec_ctx, &t->global, true, "send_ping");
}
static void ack_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -1203,6 +1398,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
close_transport = grpc_chttp2_has_streams(t)
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE("GOAWAY sent");
+ grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "goaway_sent");
}
if (op->set_accept_stream) {
@@ -1220,7 +1416,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_ping) {
- send_ping_locked(t, op->send_ping);
+ send_ping_locked(exec_ctx, t, op->send_ping);
}
if (close_transport != GRPC_ERROR_NONE) {
@@ -1366,7 +1562,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-static void status_codes_from_error(grpc_error *error,
+static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
grpc_chttp2_error_code *http2_error,
grpc_status_code *grpc_status) {
intptr_t ip_http;
@@ -1386,8 +1582,8 @@ static void status_codes_from_error(grpc_error *error,
if (have_grpc) {
*grpc_status = (grpc_status_code)ip_grpc;
} else if (have_http) {
- *grpc_status =
- grpc_chttp2_http2_error_to_grpc_status((grpc_chttp2_error_code)ip_http);
+ *grpc_status = grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)ip_http, deadline);
} else {
*grpc_status = GRPC_STATUS_INTERNAL;
}
@@ -1400,13 +1596,16 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx,
if (!stream_global->read_closed || !stream_global->write_closed) {
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(due_to_error, &http_error, &grpc_status);
+ status_codes_from_error(due_to_error, stream_global->deadline, &http_error,
+ &grpc_status);
if (stream_global->id != 0) {
gpr_slice_buffer_add(
&transport_global->qbuf,
grpc_chttp2_rst_stream_create(stream_global->id, (uint32_t)http_error,
&stream_global->stats.outgoing));
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "rst_stream");
}
const char *msg =
@@ -1466,10 +1665,39 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
}
}
+static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
+ if (error == GRPC_ERROR_NONE) return;
+ for (size_t i = 0; i < *nrefs; i++) {
+ if (error == refs[i]) {
+ return;
+ }
+ }
+ refs[*nrefs] = error;
+ ++*nrefs;
+}
+
+static grpc_error *removal_error(grpc_error *extra_error,
+ grpc_chttp2_stream_global *stream_global) {
+ grpc_error *refs[3];
+ size_t nrefs = 0;
+ add_error(stream_global->read_closed_error, refs, &nrefs);
+ add_error(stream_global->write_closed_error, refs, &nrefs);
+ add_error(extra_error, refs, &nrefs);
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (nrefs > 0) {
+ error = GRPC_ERROR_CREATE_REFERENCING("Failed due to stream removal", refs,
+ nrefs);
+ }
+ GRPC_ERROR_UNREF(extra_error);
+ return error;
+}
+
static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_error *error) {
+ error = removal_error(error, stream_global);
+ stream_global->send_message = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished, GRPC_ERROR_REF(error));
@@ -1492,14 +1720,17 @@ void grpc_chttp2_mark_stream_closed(
}
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
if (close_reads && !stream_global->read_closed) {
+ stream_global->read_closed_error = GRPC_ERROR_REF(error);
stream_global->read_closed = true;
stream_global->published_initial_metadata = true;
stream_global->published_trailing_metadata = true;
decrement_active_streams_locked(exec_ctx, transport_global, stream_global);
}
if (close_writes && !stream_global->write_closed) {
+ stream_global->write_closed_error = GRPC_ERROR_REF(error);
stream_global->write_closed = true;
- if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.writing_active) {
+ if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.write_state !=
+ GRPC_CHTTP2_WRITING_INACTIVE) {
GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes");
grpc_chttp2_list_add_closed_waiting_for_writing(transport_global,
stream_global);
@@ -1509,7 +1740,6 @@ void grpc_chttp2_mark_stream_closed(
}
}
if (stream_global->read_closed && stream_global->write_closed) {
- stream_global->removal_error = GRPC_ERROR_REF(error);
if (stream_global->id != 0 &&
TRANSPORT_FROM_GLOBAL(transport_global)->executor.parsing_active) {
grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
@@ -1517,7 +1747,8 @@ void grpc_chttp2_mark_stream_closed(
} else {
if (stream_global->id != 0) {
remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
- stream_global->id, GRPC_ERROR_REF(error));
+ stream_global->id,
+ removal_error(GRPC_ERROR_REF(error), stream_global));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
@@ -1536,7 +1767,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(error, &http_error, &grpc_status);
+ status_codes_from_error(error, stream_global->deadline, &http_error,
+ &grpc_status);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@@ -1641,6 +1873,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
1, error);
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "close_from_api");
}
typedef struct {
@@ -1670,8 +1904,14 @@ static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
/** update window from a settings change */
+typedef struct {
+ grpc_chttp2_transport *t;
+ grpc_exec_ctx *exec_ctx;
+} update_global_window_args;
+
static void update_global_window(void *args, uint32_t id, void *stream) {
- grpc_chttp2_transport *t = args;
+ update_global_window_args *a = args;
+ grpc_chttp2_transport *t = a->t;
grpc_chttp2_stream *s = stream;
grpc_chttp2_transport_global *transport_global = &t->global;
grpc_chttp2_stream_global *stream_global = &s->global;
@@ -1685,7 +1925,8 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
is_zero = stream_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(a->exec_ctx, transport_global, stream_global,
+ true, "update_global_window");
}
}
@@ -1794,14 +2035,19 @@ static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_transport_global *transport_global = &t->global;
grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
/* copy parsing qbuf to global qbuf */
- gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
+ if (t->parsing.qbuf.count > 0) {
+ gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "parsing_qbuf");
+ }
/* merge stream lists */
grpc_chttp2_stream_map_move_into(&t->new_stream_map, &t->parsing_stream_map);
transport_global->concurrent_stream_count =
(uint32_t)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
if (transport_parsing->initial_window_update != 0) {
+ update_global_window_args args = {t, exec_ctx};
grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
- update_global_window, t);
+ update_global_window, &args);
transport_parsing->initial_window_update = 0;
}
/* handle higher level things */
@@ -1824,7 +2070,7 @@ static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(stream_global->write_closed);
GPR_ASSERT(stream_global->read_closed);
remove_stream(exec_ctx, t, stream_global->id,
- GRPC_ERROR_REF(stream_global->removal_error));
+ removal_error(GRPC_ERROR_NONE, stream_global));
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
@@ -1847,11 +2093,12 @@ static void post_reading_action_locked(grpc_exec_ctx *exec_ctx,
}
drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
t->endpoint_reading = 0;
- if (!t->executor.writing_active && t->ep) {
- grpc_endpoint_destroy(exec_ctx, t->ep);
- t->ep = NULL;
- /* safe as we still have a ref for read */
- UNREF_TRANSPORT(exec_ctx, t, "disconnect");
+ if (grpc_http_write_state_trace) {
+ gpr_log(GPR_DEBUG, "R:%p -> 0 ws=%s", t,
+ write_state_name(t->executor.write_state));
+ }
+ if (t->executor.write_state == GRPC_CHTTP2_WRITING_INACTIVE && t->ep) {
+ destroy_endpoint(exec_ctx, t);
}
} else if (!t->closed) {
keep_reading = true;
@@ -1935,7 +2182,7 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
}
static void incoming_byte_stream_update_flow_control(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
size_t have_already) {
uint32_t max_recv_bytes;
@@ -1970,7 +2217,8 @@ static void incoming_byte_stream_update_flow_control(
add_max_recv_bytes);
grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global,
stream_global);
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ false, "read_incoming_stream");
}
}
@@ -1992,8 +2240,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global = &bs->stream->global;
if (bs->is_tail) {
- incoming_byte_stream_update_flow_control(
- transport_global, stream_global, arg->max_size_hint, bs->slices.length);
+ incoming_byte_stream_update_flow_control(exec_ctx, transport_global,
+ stream_global, arg->max_size_hint,
+ bs->slices.length);
}
if (bs->slices.count > 0) {
*arg->slice = gpr_slice_buffer_take_first(&bs->slices);
@@ -2177,7 +2426,7 @@ static char *format_flowctl_context_var(const char *context, const char *var,
if (context == NULL) {
*scope = NULL;
gpr_asprintf(&buf, "%s(%" PRId64 ")", var, val);
- result = gpr_leftpad(buf, ' ', 40);
+ result = gpr_leftpad(buf, ' ', 60);
gpr_free(buf);
return result;
}
@@ -2190,7 +2439,7 @@ static char *format_flowctl_context_var(const char *context, const char *var,
gpr_free(tmp);
}
gpr_asprintf(&buf, "%s.%s(%" PRId64 ")", underscore_pos + 1, var, val);
- result = gpr_leftpad(buf, ' ', 40);
+ result = gpr_leftpad(buf, ' ', 60);
gpr_free(buf);
return result;
}
@@ -2223,7 +2472,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
tmp_phase = gpr_leftpad(phase, ' ', 8);
tmp_scope1 = gpr_leftpad(scope1, ' ', 11);
- gpr_asprintf(&prefix, "FLOW %s: %s %s ", phase, clisvr, scope1);
+ gpr_asprintf(&prefix, "FLOW %s: %s %s ", tmp_phase, clisvr, scope1);
gpr_free(tmp_phase);
gpr_free(tmp_scope1);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index b5180c6fc8..e1dcf5262a 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -305,6 +305,22 @@ typedef struct grpc_chttp2_executor_action_header {
void *arg;
} grpc_chttp2_executor_action_header;
+typedef enum {
+ /** no writing activity */
+ GRPC_CHTTP2_WRITING_INACTIVE,
+ /** write has been requested, but not scheduled yet */
+ GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER,
+ GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER,
+ /** write has been requested and scheduled against the workqueue */
+ GRPC_CHTTP2_WRITE_SCHEDULED,
+ /** write has been initiated after being reaped from the workqueue */
+ GRPC_CHTTP2_WRITING,
+ /** write has been initiated, AND another write needs to be started once it's
+ done */
+ GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
+ GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
+} grpc_chttp2_write_state;
+
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
@@ -319,10 +335,10 @@ struct grpc_chttp2_transport {
/** is a thread currently in the global lock */
bool global_active;
- /** is a thread currently writing */
- bool writing_active;
/** is a thread currently parsing */
bool parsing_active;
+ /** write execution state of the transport */
+ grpc_chttp2_write_state write_state;
grpc_chttp2_executor_action_header *pending_actions_head;
grpc_chttp2_executor_action_header *pending_actions_tail;
@@ -342,7 +358,8 @@ struct grpc_chttp2_transport {
/** global state for reading/writing */
grpc_chttp2_transport_global global;
/** state only accessible by the chain of execution that
- set writing_active=1 */
+ set writing_state >= GRPC_WRITING, and only by the writing closure
+ chain. */
grpc_chttp2_transport_writing writing;
/** state only accessible by the chain of execution that
set parsing_active=1 */
@@ -363,6 +380,8 @@ struct grpc_chttp2_transport {
grpc_closure reading_action;
/** closure to actually do parsing */
grpc_closure parsing_action;
+ /** closure to initiate writing */
+ grpc_closure initiate_writing;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
@@ -436,8 +455,10 @@ typedef struct {
bool seen_error;
bool exceeded_metadata_size;
- /** the error that resulted in this stream being removed */
- grpc_error *removal_error;
+ /** the error that resulted in this stream being read-closed */
+ grpc_error *read_closed_error;
+ /** the error that resulted in this stream being write-closed */
+ grpc_error *write_closed_error;
bool published_initial_metadata;
bool published_trailing_metadata;
@@ -447,6 +468,8 @@ typedef struct {
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_frame_queue incoming_frames;
+
+ gpr_timespec deadline;
} grpc_chttp2_stream_global;
typedef struct {
@@ -512,15 +535,20 @@ struct grpc_chttp2_stream {
};
/** Transport writing call flow:
- chttp2_transport.c calls grpc_chttp2_unlocking_check_writes to see if writes
- are required;
- if they are, chttp2_transport.c calls grpc_chttp2_perform_writes to do the
- writes.
- Once writes have been completed (meaning another write could potentially be
- started),
- grpc_chttp2_terminate_writing is called. This will call
- grpc_chttp2_cleanup_writing, at which
- point the write phase is complete. */
+ grpc_chttp2_initiate_write() is called anywhere that we know bytes need to
+ go out on the wire.
+ If no other write has been started, a task is enqueued onto our workqueue.
+ When that task executes, it obtains the global lock, and gathers the data
+ to write.
+ The global lock is dropped and we do the syscall to write.
+ After writing, a follow-up check is made to see if another round of writing
+ should be performed.
+
+ The actual call chain is documented in the implementation of this function.
+ */
+void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
@@ -608,9 +636,8 @@ int grpc_chttp2_list_pop_check_read_ops(
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
-void grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- bool is_window_available);
+bool grpc_chttp2_list_flush_writing_stalled_by_transport(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
@@ -820,7 +847,9 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
-void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
+void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ bool covered_by_poller, const char *reason);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 991d7729af..e1fc0ddee2 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -154,10 +154,8 @@ void grpc_chttp2_publish_reads(
transport_parsing, outgoing_window);
is_zero = transport_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
- while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
- &stream_global)) {
- grpc_chttp2_become_writable(transport_global, stream_global);
- }
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "new_global_flow_control");
}
if (transport_parsing->incoming_window <
@@ -168,6 +166,8 @@ void grpc_chttp2_publish_reads(
announce_incoming_window, announce_bytes);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
incoming_window, announce_bytes);
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "global incoming window");
}
/* for each stream that saw an update, fixup global state */
@@ -190,7 +190,8 @@ void grpc_chttp2_publish_reads(
outgoing_window);
is_zero = stream_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(transport_global, stream_global);
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ false, "stream.read_flow_control");
}
stream_global->max_recv_bytes -= (uint32_t)GPR_MIN(
@@ -236,9 +237,10 @@ void grpc_chttp2_publish_reads(
GRPC_ERROR_INT_HTTP2_ERROR, &reason);
if (has_reason && reason != GRPC_CHTTP2_NO_ERROR) {
grpc_status_code status_code =
- has_reason ? grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)reason)
- : GRPC_STATUS_INTERNAL;
+ has_reason
+ ? grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)reason, stream_global->deadline)
+ : GRPC_STATUS_INTERNAL;
const char *status_details =
grpc_error_string(stream_parsing->forced_close_error);
gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.c b/src/core/ext/transport/chttp2/transport/status_conversion.c
index c42fb9b3a1..5dce2f2d0c 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.c
+++ b/src/core/ext/transport/chttp2/transport/status_conversion.c
@@ -39,6 +39,8 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) {
return GRPC_CHTTP2_NO_ERROR;
case GRPC_STATUS_CANCELLED:
return GRPC_CHTTP2_CANCEL;
+ case GRPC_STATUS_DEADLINE_EXCEEDED:
+ return GRPC_CHTTP2_CANCEL;
case GRPC_STATUS_RESOURCE_EXHAUSTED:
return GRPC_CHTTP2_ENHANCE_YOUR_CALM;
case GRPC_STATUS_PERMISSION_DENIED:
@@ -51,13 +53,17 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) {
}
grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
- grpc_chttp2_error_code error) {
+ grpc_chttp2_error_code error, gpr_timespec deadline) {
switch (error) {
case GRPC_CHTTP2_NO_ERROR:
/* should never be received */
return GRPC_STATUS_INTERNAL;
case GRPC_CHTTP2_CANCEL:
- return GRPC_STATUS_CANCELLED;
+ /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
+ * exceeded */
+ return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0
+ ? GRPC_STATUS_DEADLINE_EXCEEDED
+ : GRPC_STATUS_CANCELLED;
case GRPC_CHTTP2_ENHANCE_YOUR_CALM:
return GRPC_STATUS_RESOURCE_EXHAUSTED;
case GRPC_CHTTP2_INADEQUATE_SECURITY:
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.h b/src/core/ext/transport/chttp2/transport/status_conversion.h
index e7285e6fd5..953bc9f1e1 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.h
+++ b/src/core/ext/transport/chttp2/transport/status_conversion.h
@@ -41,7 +41,7 @@
grpc_chttp2_error_code grpc_chttp2_grpc_status_to_http2_error(
grpc_status_code status);
grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
- grpc_chttp2_error_code error);
+ grpc_chttp2_error_code error, gpr_timespec deadline);
/* Conversion of HTTP status codes (:status) to grpc status codes */
grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status);
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 8f3ab00e6d..2eb5f5f632 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -329,6 +329,7 @@ void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
+ gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
}
@@ -336,27 +337,28 @@ void grpc_chttp2_list_add_writing_stalled_by_transport(
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- bool is_window_available) {
+bool grpc_chttp2_list_flush_writing_stalled_by_transport(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream *stream;
+ bool out = false;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
- if (is_window_available) {
- grpc_chttp2_become_writable(&transport->global, &stream->global);
- } else {
- grpc_chttp2_list_add_stalled_by_transport(transport_writing,
- &stream->writing);
- }
+ gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
+ stream->global.id);
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ &stream->writing);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
"chttp2_writing_stalled");
+ out = true;
}
+ return out;
}
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
+ gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index b19f5f068d..e0d87725e9 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -75,9 +75,13 @@ int grpc_chttp2_unlocking_check_writes(
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
- bool is_window_available = transport_writing->outgoing_window > 0;
- grpc_chttp2_list_flush_writing_stalled_by_transport(
- exec_ctx, transport_writing, is_window_available);
+ if (transport_writing->outgoing_window > 0) {
+ while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
+ &stream_global)) {
+ grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
+ false, "transport.read_flow_control");
+ }
+ }
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
@@ -331,6 +335,12 @@ void grpc_chttp2_cleanup_writing(
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
+ if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
+ transport_writing)) {
+ grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
+ "resume_stalled_stream");
+ }
+
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_writing->sent_initial_metadata) {
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index 1ab3733d38..f901fcf962 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -65,3 +65,7 @@ void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
return ep->vtable->get_peer(ep);
}
+
+grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
+ return ep->vtable->get_workqueue(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index f9808bbda1..894efc0b23 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -51,6 +51,7 @@ struct grpc_endpoint_vtable {
gpr_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_slice_buffer *slices, grpc_closure *cb);
+ grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -69,6 +70,9 @@ void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
+/* Retrieve a reference to the workqueue associated with this endpoint */
+grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
+
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index cf0fe736a0..6a63c4d1d1 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -57,6 +57,7 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -113,9 +114,7 @@ struct grpc_fd {
grpc_closure *read_closure;
grpc_closure *write_closure;
- /* The polling island to which this fd belongs to and the mutex protecting the
- the field */
- gpr_mu pi_mu;
+ /* The polling island to which this fd belongs to (protected by mu) */
struct polling_island *polling_island;
struct grpc_fd *freelist_next;
@@ -152,16 +151,17 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-// #define GRPC_PI_REF_COUNT_DEBUG
+//#define GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_PI_REF_COUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__)
+#define PI_UNREF(exec_ctx, p, r) \
+ pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(p, r) pi_unref((p))
+#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
@@ -172,7 +172,7 @@ typedef struct polling_island {
Once the ref count becomes zero, this structure is destroyed which means
we should ensure that there is never a scenario where a PI_ADD_REF() is
racing with a PI_UNREF() that just made the ref_count zero. */
- gpr_refcount ref_count;
+ gpr_atm ref_count;
/* Pointer to the polling_island this merged into.
* merged_to value is only set once in polling_island's lifetime (and that too
@@ -184,6 +184,9 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
+ /* The workqueue associated with this polling island */
+ grpc_workqueue *workqueue;
+
/* The fd of the underlying epoll set */
int epoll_fd;
@@ -191,11 +194,6 @@ typedef struct polling_island {
size_t fd_cnt;
size_t fd_capacity;
grpc_fd **fds;
-
- /* Polling islands that are no longer needed are kept in a freelist so that
- they can be reused. This field points to the next polling island in the
- free list */
- struct polling_island *next_free;
} polling_island;
/*******************************************************************************
@@ -253,13 +251,14 @@ struct grpc_pollset_set {
* Common helpers
*/
-static void append_error(grpc_error **composite, grpc_error *error,
+static bool append_error(grpc_error **composite, grpc_error *error,
const char *desc) {
- if (error == GRPC_ERROR_NONE) return;
+ if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE(desc);
}
*composite = grpc_error_add_child(*composite, error);
+ return false;
}
/*******************************************************************************
@@ -275,11 +274,8 @@ static void append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
-/* Polling island freelist */
-static gpr_mu g_pi_freelist_mu;
-static polling_island *g_pi_freelist = NULL;
-
-static void polling_island_delete(); /* Forward declaration */
+/* Forward declaration */
+static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -293,28 +289,35 @@ gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
#ifdef GRPC_PI_REF_COUNT_DEBUG
-void pi_add_ref(polling_island *pi);
-void pi_unref(polling_island *pi);
+static void pi_add_ref(polling_island *pi);
+static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-void pi_add_ref_dbg(polling_island *pi, char *reason, char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
+static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
+ int line) {
+ long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
-void pi_unref_dbg(polling_island *pi, char *reason, char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
- pi_unref(pi);
+static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
+ char *reason, char *file, int line) {
+ long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
#endif
-void pi_add_ref(polling_island *pi) { gpr_ref(&pi->ref_count); }
+static void pi_add_ref(polling_island *pi) {
+ gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
+}
-void pi_unref(polling_island *pi) {
- /* If ref count went to zero, delete the polling island.
+static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
+ /* If ref count went to one, we're back to just the workqueue owning a ref.
+ Unref the workqueue to break the loop.
+
+ If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@@ -322,12 +325,20 @@ void pi_unref(polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
- if (gpr_unref(&pi->ref_count)) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(pi);
- if (next != NULL) {
- PI_UNREF(next, "pi_delete"); /* Recursive call */
+ switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
+ case 2: /* last external ref: the only one now owned is by the workqueue */
+ GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
+ break;
+ case 1: {
+ polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ polling_island_delete(exec_ctx, pi);
+ if (next != NULL) {
+ PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
+ }
+ break;
}
+ case 0:
+ GPR_UNREACHABLE_CODE(return );
}
}
@@ -462,69 +473,68 @@ static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
}
/* Might return NULL in case of an error */
-static polling_island *polling_island_create(grpc_fd *initial_fd,
+static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
+ grpc_fd *initial_fd,
grpc_error **error) {
polling_island *pi = NULL;
- char *err_msg;
const char *err_desc = "polling_island_create";
- /* Try to get one from the polling island freelist */
- gpr_mu_lock(&g_pi_freelist_mu);
- if (g_pi_freelist != NULL) {
- pi = g_pi_freelist;
- g_pi_freelist = g_pi_freelist->next_free;
- pi->next_free = NULL;
- }
- gpr_mu_unlock(&g_pi_freelist_mu);
+ *error = GRPC_ERROR_NONE;
- /* Create new polling island if we could not get one from the free list */
- if (pi == NULL) {
- pi = gpr_malloc(sizeof(*pi));
- gpr_mu_init(&pi->mu);
- pi->fd_cnt = 0;
- pi->fd_capacity = 0;
- pi->fds = NULL;
- }
+ pi = gpr_malloc(sizeof(*pi));
+ gpr_mu_init(&pi->mu);
+ pi->fd_cnt = 0;
+ pi->fd_capacity = 0;
+ pi->fds = NULL;
+ pi->epoll_fd = -1;
+ pi->workqueue = NULL;
- gpr_ref_init(&pi->ref_count, 0);
+ gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
- gpr_asprintf(&err_msg, "epoll_create1 failed with error %d (%s)", errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- } else {
- polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
- pi->next_free = NULL;
+ append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
+ goto done;
+ }
- if (initial_fd != NULL) {
- /* Lock the polling island here just in case we got this structure from
- the freelist and the polling island lock was not released yet (by the
- code that adds the polling island to the freelist) */
- gpr_mu_lock(&pi->mu);
- polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
- gpr_mu_unlock(&pi->mu);
- }
+ polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
+
+ if (initial_fd != NULL) {
+ polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
+ }
+
+ if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
+ err_desc) &&
+ *error == GRPC_ERROR_NONE) {
+ polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
+ error);
+ GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
+ pi->workqueue->wakeup_read_fd->polling_island = pi;
+ PI_ADD_REF(pi, "fd");
}
+done:
+ if (*error != GRPC_ERROR_NONE) {
+ if (pi->workqueue != NULL) {
+ GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
+ }
+ polling_island_delete(exec_ctx, pi);
+ pi = NULL;
+ }
return pi;
}
-static void polling_island_delete(polling_island *pi) {
+static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
GPR_ASSERT(pi->fd_cnt == 0);
- gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
-
- close(pi->epoll_fd);
- pi->epoll_fd = -1;
-
- gpr_mu_lock(&g_pi_freelist_mu);
- pi->next_free = g_pi_freelist;
- g_pi_freelist = pi;
- gpr_mu_unlock(&g_pi_freelist_mu);
+ if (pi->epoll_fd >= 0) {
+ close(pi->epoll_fd);
+ }
+ gpr_mu_destroy(&pi->mu);
+ gpr_free(pi->fds);
+ gpr_free(pi);
}
/* Attempts to gets the last polling island in the linked list (liked by the
@@ -704,9 +714,6 @@ static polling_island *polling_island_merge(polling_island *p,
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
- gpr_mu_init(&g_pi_freelist_mu);
- g_pi_freelist = NULL;
-
error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
if (error == GRPC_ERROR_NONE) {
error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
@@ -716,18 +723,6 @@ static grpc_error *polling_island_global_init() {
}
static void polling_island_global_shutdown() {
- polling_island *next;
- gpr_mu_lock(&g_pi_freelist_mu);
- gpr_mu_unlock(&g_pi_freelist_mu);
- while (g_pi_freelist != NULL) {
- next = g_pi_freelist->next_free;
- gpr_mu_destroy(&g_pi_freelist->mu);
- gpr_free(g_pi_freelist->fds);
- gpr_free(g_pi_freelist);
- g_pi_freelist = next;
- }
- gpr_mu_destroy(&g_pi_freelist_mu);
-
grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
}
@@ -845,7 +840,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->mu);
- gpr_mu_init(&new_fd->pi_mu);
}
/* Note: It is not really needed to get the new_fd->mu lock here. If this is a
@@ -896,6 +890,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
const char *reason) {
bool is_fd_closed = false;
grpc_error *error = GRPC_ERROR_NONE;
+ polling_island *unref_pi = NULL;
gpr_mu_lock(&fd->mu);
fd->on_done_closure = on_done;
@@ -923,21 +918,26 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- Unlock the latest polling island
- Set fd->polling_island to NULL (but remove the ref on the polling island
before doing this.) */
- gpr_mu_lock(&fd->pi_mu);
if (fd->polling_island != NULL) {
polling_island *pi_latest = polling_island_lock(fd->polling_island);
polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error);
gpr_mu_unlock(&pi_latest->mu);
- PI_UNREF(fd->polling_island, "fd_orphan");
+ unref_pi = fd->polling_island;
fd->polling_island = NULL;
}
- gpr_mu_unlock(&fd->pi_mu);
grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, error, NULL);
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
+ if (unref_pi != NULL) {
+ /* Unref stale polling island here, outside the fd lock above.
+ The polling island owns a workqueue which owns an fd, and unreffing
+ inside the lock can cause an eventual lock loop that makes TSAN very
+ unhappy. */
+ PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
+ }
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
}
@@ -1037,6 +1037,17 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_mu_unlock(&fd->mu);
}
+static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ grpc_workqueue *workqueue = NULL;
+ if (fd->polling_island != NULL) {
+ workqueue =
+ GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
+ }
+ gpr_mu_unlock(&fd->mu);
+ return workqueue;
+}
+
/*******************************************************************************
* Pollset Definitions
*/
@@ -1227,9 +1238,10 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_unlock(&fd->mu);
}
-static void pollset_release_polling_island(grpc_pollset *ps, char *reason) {
+static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *ps, char *reason) {
if (ps->polling_island != NULL) {
- PI_UNREF(ps->polling_island, reason);
+ PI_UNREF(exec_ctx, ps->polling_island, reason);
}
ps->polling_island = NULL;
}
@@ -1242,7 +1254,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
pollset->finish_shutdown_called = true;
/* Release the ref and set pollset->polling_island to NULL */
- pollset_release_polling_island(pollset, "ps_shutdown");
+ pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
}
@@ -1281,7 +1293,7 @@ static void pollset_reset(grpc_pollset *pollset) {
pollset->finish_shutdown_called = false;
pollset->kicked_without_pollers = false;
pollset->shutdown_done = NULL;
- pollset_release_polling_island(pollset, "ps_reset");
+ GPR_ASSERT(pollset->polling_island == NULL);
}
#define GRPC_EPOLL_MAX_EVENTS 1000
@@ -1309,7 +1321,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
this function (i.e pollset_work_and_unlock()) is called */
if (pollset->polling_island == NULL) {
- pollset->polling_island = polling_island_create(NULL, error);
+ pollset->polling_island = polling_island_create(exec_ctx, NULL, error);
if (pollset->polling_island == NULL) {
GPR_TIMER_END("pollset_work_and_unlock", 0);
return; /* Fatal error. We cannot continue */
@@ -1329,7 +1341,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
/* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
polling island to be deleted */
PI_ADD_REF(pi, "ps");
- PI_UNREF(pollset->polling_island, "ps");
+ PI_UNREF(exec_ctx, pollset->polling_island, "ps");
pollset->polling_island = pi;
}
@@ -1400,7 +1412,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
that we got before releasing the polling island lock). This is because
pollset->polling_island pointer might get udpated in other parts of the
code when there is an island merge while we are doing epoll_wait() above */
- PI_UNREF(pi, "ps_work");
+ PI_UNREF(exec_ctx, pi, "ps_work");
GPR_TIMER_END("pollset_work_and_unlock", 0);
}
@@ -1517,10 +1529,11 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&pollset->mu);
- gpr_mu_lock(&fd->pi_mu);
+ gpr_mu_lock(&fd->mu);
polling_island *pi_new = NULL;
+retry:
/* 1) If fd->polling_island and pollset->polling_island are both non-NULL and
* equal, do nothing.
* 2) If fd->polling_island and pollset->polling_island are both NULL, create
@@ -1535,15 +1548,44 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* polling_island fields in both fd and pollset to point to the merged
* polling island.
*/
+
+ if (fd->orphaned) {
+ gpr_mu_unlock(&fd->mu);
+ gpr_mu_unlock(&pollset->mu);
+ /* early out */
+ return;
+ }
+
if (fd->polling_island == pollset->polling_island) {
pi_new = fd->polling_island;
if (pi_new == NULL) {
- pi_new = polling_island_create(fd, &error);
-
- GRPC_POLLING_TRACE(
- "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, "
- "pollset: %p)",
- (void *)pi_new, fd->fd, (void *)pollset);
+ /* Unlock before creating a new polling island: the polling island will
+ create a workqueue which creates a file descriptor, and holding an fd
+ lock here can eventually cause a loop to appear to TSAN (making it
+ unhappy). We don't think it's a real loop (there's an epoch point where
+ that loop possibility disappears), but the advantages of keeping TSAN
+ happy outweigh any performance advantage we might have by keeping the
+ lock held. */
+ gpr_mu_unlock(&fd->mu);
+ pi_new = polling_island_create(exec_ctx, fd, &error);
+ gpr_mu_lock(&fd->mu);
+ /* Need to reverify any assumptions made between the initial lock and
+ getting to this branch: if they've changed, we need to throw away our
+ work and figure things out again. */
+ if (fd->polling_island != NULL) {
+ GRPC_POLLING_TRACE(
+ "pollset_add_fd: Raced creating new polling island. pi_new: %p "
+ "(fd: %d, pollset: %p)",
+ (void *)pi_new, fd->fd, (void *)pollset);
+ PI_ADD_REF(pi_new, "dance_of_destruction");
+ PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
+ goto retry;
+ } else {
+ GRPC_POLLING_TRACE(
+ "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, "
+ "pollset: %p)",
+ (void *)pi_new, fd->fd, (void *)pollset);
+ }
}
} else if (fd->polling_island == NULL) {
pi_new = polling_island_lock(pollset->polling_island);
@@ -1579,7 +1621,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (fd->polling_island != pi_new) {
PI_ADD_REF(pi_new, "fd");
if (fd->polling_island != NULL) {
- PI_UNREF(fd->polling_island, "fd");
+ PI_UNREF(exec_ctx, fd->polling_island, "fd");
}
fd->polling_island = pi_new;
}
@@ -1587,13 +1629,15 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->polling_island != pi_new) {
PI_ADD_REF(pi_new, "ps");
if (pollset->polling_island != NULL) {
- PI_UNREF(pollset->polling_island, "ps");
+ PI_UNREF(exec_ctx, pollset->polling_island, "ps");
}
pollset->polling_island = pi_new;
}
- gpr_mu_unlock(&fd->pi_mu);
+ gpr_mu_unlock(&fd->mu);
gpr_mu_unlock(&pollset->mu);
+
+ GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
/*******************************************************************************
@@ -1744,9 +1788,9 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
void *grpc_fd_get_polling_island(grpc_fd *fd) {
polling_island *pi;
- gpr_mu_lock(&fd->pi_mu);
+ gpr_mu_lock(&fd->mu);
pi = fd->polling_island;
- gpr_mu_unlock(&fd->pi_mu);
+ gpr_mu_unlock(&fd->mu);
return pi;
}
@@ -1794,6 +1838,7 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
+ .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
index 9e306af5fa..c2107e5e39 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -725,6 +725,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GRPC_FD_UNREF(fd, "poll");
}
+static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
+
/*******************************************************************************
* pollset_posix.c
*/
@@ -2006,6 +2008,7 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
+ .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 45c0a5e954..4b593f4b2c 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -617,6 +617,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GRPC_FD_UNREF(fd, "poll");
}
+static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
+
/*******************************************************************************
* pollset_posix.c
*/
@@ -1234,6 +1236,7 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
+ .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index a3c1e9db9a..6536672685 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -148,6 +148,10 @@ grpc_fd *grpc_fd_create(int fd, const char *name) {
return g_event_engine->fd_create(fd, name);
}
+grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) {
+ return g_event_engine->fd_get_workqueue(fd);
+}
+
int grpc_fd_wrapped_fd(grpc_fd *fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 579c84ef70..c2aa1756ea 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -56,6 +56,7 @@ typedef struct grpc_event_engine_vtable {
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
bool (*fd_is_shutdown)(grpc_fd *fd);
+ grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd);
grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
grpc_fd *fd);
@@ -107,6 +108,9 @@ const char *grpc_get_poll_strategy_name();
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
+/* Get a workqueue that's associated with this fd */
+grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd);
+
/* Return the wrapped fd, or -1 if it has been released or closed. */
int grpc_fd_wrapped_fd(grpc_fd *fd);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index c44aafcddf..ac7785ec13 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
@@ -85,14 +86,17 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
- GPR_ASSERT(offload_target_or_null == NULL);
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ if (offload_target_or_null == NULL) {
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ } else {
+ grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
+ GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
+ }
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null) {
- GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure_list_move(list, &exec_ctx->closure_list);
}
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 38f27d9b13..917f332f03 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -93,7 +93,11 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
-/** Add a closure to be executed at the next flush/finish point */
+/** Add a closure to be executed in the future.
+ If \a offload_target_or_null is NULL, the closure will be executed at the
+ next exec_ctx.{finish,flush} point.
+ If \a offload_target_or_null is non-NULL, the closure will be scheduled
+ against the workqueue, and a reference to the workqueue will be consumed. */
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null);
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 89292a153e..d67d388b8c 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -45,6 +45,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@@ -62,6 +63,7 @@ void grpc_iomgr_init(void) {
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
+ grpc_network_status_init();
grpc_iomgr_platform_init();
}
@@ -140,6 +142,7 @@ void grpc_iomgr_shutdown(void) {
grpc_iomgr_platform_shutdown();
grpc_exec_ctx_global_shutdown();
+ grpc_network_status_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv);
}
diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c
index 38a1c9b7d4..90c074b007 100644
--- a/src/core/lib/iomgr/network_status_tracker.c
+++ b/src/core/lib/iomgr/network_status_tracker.c
@@ -42,27 +42,21 @@ typedef struct endpoint_ll_node {
static endpoint_ll_node *head = NULL;
static gpr_mu g_endpoint_mutex;
-static bool g_init_done = false;
-void grpc_initialize_network_status_monitor() {
- g_init_done = true;
- gpr_mu_init(&g_endpoint_mutex);
- // TODO(makarandd): Install callback with OS to monitor network status.
-}
-
-void grpc_destroy_network_status_monitor() {
- for (endpoint_ll_node *curr = head; curr != NULL;) {
- endpoint_ll_node *next = curr->next;
- gpr_free(curr);
- curr = next;
+void grpc_network_status_shutdown(void) {
+ if (head != NULL) {
+ gpr_log(GPR_ERROR,
+ "Memory leaked as all network endpoints were not shut down");
}
gpr_mu_destroy(&g_endpoint_mutex);
}
+void grpc_network_status_init(void) {
+ gpr_mu_init(&g_endpoint_mutex);
+ // TODO(makarandd): Install callback with OS to monitor network status.
+}
+
void grpc_network_status_register_endpoint(grpc_endpoint *ep) {
- if (!g_init_done) {
- grpc_initialize_network_status_monitor();
- }
gpr_mu_lock(&g_endpoint_mutex);
if (head == NULL) {
head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index 74a1aa8135..67cb645f44 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -35,7 +35,11 @@
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
#include "src/core/lib/iomgr/endpoint.h"
+void grpc_network_status_init(void);
+void grpc_network_status_shutdown(void);
+
void grpc_network_status_register_endpoint(grpc_endpoint *ep);
void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
void grpc_network_status_shutdown_all_endpoints();
+
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 2ab45e33ce..ec21e03944 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -284,7 +284,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
/* returns true if done, false if pending; if returning true, *error is set */
-#define MAX_WRITE_IOVEC 16
+#define MAX_WRITE_IOVEC 1024
static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
@@ -450,9 +450,19 @@ static char *tcp_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
-static const grpc_endpoint_vtable vtable = {
- tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
- tcp_shutdown, tcp_destroy, tcp_get_peer};
+static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return grpc_fd_get_workqueue(tcp->em_fd);
+}
+
+static const grpc_endpoint_vtable vtable = {tcp_read,
+ tcp_write,
+ tcp_get_workqueue,
+ tcp_add_to_pollset,
+ tcp_add_to_pollset_set,
+ tcp_shutdown,
+ tcp_destroy,
+ tcp_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
const char *peer_string) {
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 7b713723ce..38ebd2dbcb 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -491,7 +491,8 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
}
for (unsigned i = 0; i < count; i++) {
- int fd, port;
+ int fd = -1;
+ int port = -1;
grpc_dualstack_mode dsmode;
err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
&dsmode, &fd);
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 37ab59021e..35054c42b5 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -389,9 +389,16 @@ static char *win_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
-static grpc_endpoint_vtable vtable = {
- win_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
- win_shutdown, win_destroy, win_get_peer};
+static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
+
+static grpc_endpoint_vtable vtable = {win_read,
+ win_write,
+ win_get_workqueue,
+ win_add_to_pollset,
+ win_add_to_pollset_set,
+ win_shutdown,
+ win_destroy,
+ win_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index 5cc40eea50..7156e490d7 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -38,6 +38,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_set.h"
#ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/workqueue_posix.h"
@@ -49,35 +50,45 @@
/* grpc_workqueue is forward declared in exec_ctx.h */
-/** Create a work queue */
-grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
- grpc_workqueue **workqueue);
-
+/* Deprecated: do not use.
+ This has *already* been removed in a future commit. */
void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
-#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
+/* Reference counting functions. Use the macro's always
+ (GRPC_WORKQUEUE_{REF,UNREF}).
+
+ Pass in a descriptive reason string for reffing/unreffing as the last
+ argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that
+ string will be printed alongside the refcount. When it is not defined, the
+ string will be discarded at compilation time. */
+
+//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
- grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_WORKQUEUE_UNREF(cl, p, r) \
- grpc_workqueue_unref((cl), (p), __FILE__, __LINE__, (r))
+ (grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
+#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
+ grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
-#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
+#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
void grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
-/** Bind this workqueue to a pollset */
-void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue,
- grpc_pollset *pollset);
+/** Add a work item to a workqueue. Items added to a work queue will be started
+ in approximately the order they were enqueued, on some thread that may or
+ may not be the current thread. Successive closures enqueued onto a workqueue
+ MAY be executed concurrently.
+
+ It is generally more expensive to add a closure to a workqueue than to the
+ execution context, both in terms of CPU work and in execution latency.
-/** Add a work item to a workqueue */
+ Use work queues when it's important that other threads be given a chance to
+ tackle some workload. */
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error);
diff --git a/src/core/lib/iomgr/workqueue_posix.c b/src/core/lib/iomgr/workqueue_posix.c
index 45e0f6063b..e0d6dac230 100644
--- a/src/core/lib/iomgr/workqueue_posix.c
+++ b/src/core/lib/iomgr/workqueue_posix.c
@@ -70,7 +70,7 @@ grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
- GPR_ASSERT(grpc_closure_list_empty(workqueue->closure_list));
+ grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
}
@@ -100,12 +100,6 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
}
}
-void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue,
- grpc_pollset *pollset) {
- grpc_pollset_add_fd(exec_ctx, pollset, workqueue->wakeup_read_fd);
-}
-
void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
gpr_mu_lock(&workqueue->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
diff --git a/src/core/lib/iomgr/workqueue_posix.h b/src/core/lib/iomgr/workqueue_posix.h
index dcb47e7b59..0f26ba58e2 100644
--- a/src/core/lib/iomgr/workqueue_posix.h
+++ b/src/core/lib/iomgr/workqueue_posix.h
@@ -50,4 +50,9 @@ struct grpc_workqueue {
grpc_closure read_closure;
};
+/** Create a work queue. Returns an error if creation fails. If creation
+ succeeds, sets *workqueue to point to it. */
+grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue **workqueue);
+
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index 275f040b1c..23e2dea185 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -37,4 +37,26 @@
#include "src/core/lib/iomgr/workqueue.h"
+// Minimal implementation of grpc_workqueue for Windows
+// Works by directly enqueuing workqueue items onto the current execution
+// context, which is at least correct, if not performant or in the spirit of
+// workqueues.
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
+ const char *reason) {}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+#endif
+
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 7650d68e89..bc50f9d1b0 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -360,11 +360,19 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_peer(ep->wrapped_ep);
}
-static const grpc_endpoint_vtable vtable = {
- endpoint_read, endpoint_write,
- endpoint_add_to_pollset, endpoint_add_to_pollset_set,
- endpoint_shutdown, endpoint_destroy,
- endpoint_get_peer};
+static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_workqueue(ep->wrapped_ep);
+}
+
+static const grpc_endpoint_vtable vtable = {endpoint_read,
+ endpoint_write,
+ endpoint_get_workqueue,
+ endpoint_add_to_pollset,
+ endpoint_add_to_pollset_set,
+ endpoint_shutdown,
+ endpoint_destroy,
+ endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *transport,
diff --git a/src/core/lib/support/time.c b/src/core/lib/support/time.c
index 57f8331194..5a7d043aed 100644
--- a/src/core/lib/support/time.c
+++ b/src/core/lib/support/time.c
@@ -80,103 +80,67 @@ gpr_timespec gpr_inf_past(gpr_clock_type type) {
return out;
}
-/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single
- function for maintainability. Similarly for _seconds, _minutes, and _hours */
-
-gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (ns == INT64_MAX) {
- result = gpr_inf_future(type);
- } else if (ns == INT64_MIN) {
- result = gpr_inf_past(type);
- } else if (ns >= 0) {
- result.tv_sec = ns / GPR_NS_PER_SEC;
- result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
+static gpr_timespec to_seconds_from_sub_second_time(int64_t time_in_units,
+ int64_t units_per_sec,
+ gpr_clock_type type) {
+ gpr_timespec out;
+ if (time_in_units == INT64_MAX) {
+ out = gpr_inf_future(type);
+ } else if (time_in_units == INT64_MIN) {
+ out = gpr_inf_past(type);
} else {
- /* Calculation carefully formulated to avoid any possible under/overflow. */
- result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
- result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
+ if (time_in_units >= 0) {
+ out.tv_sec = time_in_units / units_per_sec;
+ } else {
+ out.tv_sec = (-((units_per_sec - 1) - (time_in_units + units_per_sec)) /
+ units_per_sec) -
+ 1;
+ }
+ out.tv_nsec = (int32_t)((time_in_units - out.tv_sec * units_per_sec) *
+ GPR_NS_PER_SEC / units_per_sec);
+ out.clock_type = type;
}
- return result;
+ return out;
}
-gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (us == INT64_MAX) {
- result = gpr_inf_future(type);
- } else if (us == INT64_MIN) {
- result = gpr_inf_past(type);
- } else if (us >= 0) {
- result.tv_sec = us / 1000000;
- result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
+static gpr_timespec to_seconds_from_above_second_time(int64_t time_in_units,
+ int64_t secs_per_unit,
+ gpr_clock_type type) {
+ gpr_timespec out;
+ if (time_in_units >= INT64_MAX / secs_per_unit) {
+ out = gpr_inf_future(type);
+ } else if (time_in_units <= INT64_MIN / secs_per_unit) {
+ out = gpr_inf_past(type);
} else {
- /* Calculation carefully formulated to avoid any possible under/overflow. */
- result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
- result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
+ out.tv_sec = time_in_units * secs_per_unit;
+ out.tv_nsec = 0;
+ out.clock_type = type;
}
- return result;
+ return out;
+}
+
+gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) {
+ return to_seconds_from_sub_second_time(ns, GPR_NS_PER_SEC, type);
+}
+
+gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
+ return to_seconds_from_sub_second_time(us, GPR_US_PER_SEC, type);
}
gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (ms == INT64_MAX) {
- result = gpr_inf_future(type);
- } else if (ms == INT64_MIN) {
- result = gpr_inf_past(type);
- } else if (ms >= 0) {
- result.tv_sec = ms / 1000;
- result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
- } else {
- /* Calculation carefully formulated to avoid any possible under/overflow. */
- result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
- result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
- }
- return result;
+ return to_seconds_from_sub_second_time(ms, GPR_MS_PER_SEC, type);
}
gpr_timespec gpr_time_from_seconds(int64_t s, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (s == INT64_MAX) {
- result = gpr_inf_future(type);
- } else if (s == INT64_MIN) {
- result = gpr_inf_past(type);
- } else {
- result.tv_sec = s;
- result.tv_nsec = 0;
- }
- return result;
+ return to_seconds_from_sub_second_time(s, 1, type);
}
gpr_timespec gpr_time_from_minutes(int64_t m, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (m >= INT64_MAX / 60) {
- result = gpr_inf_future(type);
- } else if (m <= INT64_MIN / 60) {
- result = gpr_inf_past(type);
- } else {
- result.tv_sec = m * 60;
- result.tv_nsec = 0;
- }
- return result;
+ return to_seconds_from_above_second_time(m, 60, type);
}
gpr_timespec gpr_time_from_hours(int64_t h, gpr_clock_type type) {
- gpr_timespec result;
- result.clock_type = type;
- if (h >= INT64_MAX / 3600) {
- result = gpr_inf_future(type);
- } else if (h <= INT64_MIN / 3600) {
- result = gpr_inf_past(type);
- } else {
- result.tv_sec = h * 3600;
- result.tv_nsec = 0;
- }
- return result;
+ return to_seconds_from_above_second_time(h, 3600, type);
}
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index e5668be47f..fc9df76dc1 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -259,7 +259,8 @@ grpc_call *grpc_call_create(
call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
}
}
- call->send_deadline = send_deadline;
+ call->send_deadline =
+ gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
/* initial refcount dropped by grpc_call_destroy */
grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index def6e5068b..2f108af48a 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -73,6 +73,7 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
typedef struct requested_call {
requested_call_type type;
+ size_t cq_idx;
void *tag;
grpc_server *server;
grpc_completion_queue *cq_bound_to_call;
@@ -206,11 +207,11 @@ struct grpc_server {
registered_method *registered_methods;
/** one request matcher for unregistered methods */
request_matcher unregistered_request_matcher;
- /** free list of available requested_calls indices */
- gpr_stack_lockfree *request_freelist;
+ /** free list of available requested_calls_per_cq indices */
+ gpr_stack_lockfree **request_freelist_per_cq;
/** requested call backing data */
- requested_call *requested_calls;
- size_t max_requested_calls;
+ requested_call **requested_calls_per_cq;
+ int max_requested_calls_per_cq;
gpr_atm shutdown_flag;
uint8_t shutdown_published;
@@ -357,7 +358,8 @@ static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
for (size_t i = 0; i < server->cq_count; i++) {
while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
-1) {
- fail_call(exec_ctx, server, i, &server->requested_calls[request_id],
+ fail_call(exec_ctx, server, i,
+ &server->requested_calls_per_cq[i][request_id],
GRPC_ERROR_REF(error));
}
}
@@ -392,12 +394,16 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
}
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
+ if (server->started) {
+ gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]);
+ gpr_free(server->requested_calls_per_cq[i]);
+ }
}
- gpr_stack_lockfree_destroy(server->request_freelist);
+ gpr_free(server->request_freelist_per_cq);
+ gpr_free(server->requested_calls_per_cq);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
- gpr_free(server->requested_calls);
gpr_free(server);
}
@@ -460,11 +466,13 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
requested_call *rc = req;
grpc_server *server = rc->server;
- if (rc >= server->requested_calls &&
- rc < server->requested_calls + server->max_requested_calls) {
- GPR_ASSERT(rc - server->requested_calls <= INT_MAX);
- gpr_stack_lockfree_push(server->request_freelist,
- (int)(rc - server->requested_calls));
+ if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
+ rc < server->requested_calls_per_cq[rc->cq_idx] +
+ server->max_requested_calls_per_cq) {
+ GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX);
+ gpr_stack_lockfree_push(
+ server->request_freelist_per_cq[rc->cq_idx],
+ (int)(rc - server->requested_calls_per_cq[rc->cq_idx]));
} else {
gpr_free(req);
}
@@ -540,7 +548,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls[request_id]);
+ &server->requested_calls_per_cq[cq_idx][request_id]);
return; /* early out */
}
}
@@ -979,8 +987,6 @@ void grpc_server_register_non_listening_completion_queue(
}
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
- size_t i;
-
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
grpc_server *server = gpr_malloc(sizeof(grpc_server));
@@ -998,15 +1004,7 @@ grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
&server->root_channel_data;
/* TODO(ctiller): expose a channel_arg for this */
- server->max_requested_calls = 32768;
- server->request_freelist =
- gpr_stack_lockfree_create(server->max_requested_calls);
- for (i = 0; i < (size_t)server->max_requested_calls; i++) {
- gpr_stack_lockfree_push(server->request_freelist, (int)i);
- }
- server->requested_calls = gpr_malloc(server->max_requested_calls *
- sizeof(*server->requested_calls));
-
+ server->max_requested_calls_per_cq = 32768;
server->channel_args = grpc_channel_args_copy(args);
return server;
@@ -1066,16 +1064,28 @@ void grpc_server_start(grpc_server *server) {
server->started = true;
size_t pollset_count = 0;
server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
+ server->request_freelist_per_cq =
+ gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
+ server->requested_calls_per_cq =
+ gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (!grpc_cq_is_non_listening_server_cq(server->cqs[i])) {
server->pollsets[pollset_count++] = grpc_cq_pollset(server->cqs[i]);
}
+ server->request_freelist_per_cq[i] =
+ gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
+ for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
+ gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
+ }
+ server->requested_calls_per_cq[i] =
+ gpr_malloc((size_t)server->max_requested_calls_per_cq *
+ sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
- server->max_requested_calls, server);
+ (size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->request_matcher, server->max_requested_calls,
- server);
+ request_matcher_init(&rm->request_matcher,
+ (size_t)server->max_requested_calls_per_cq, server);
}
for (l = server->listeners; l; l = l->next) {
@@ -1307,11 +1317,13 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_CREATE("Server Shutdown"));
return GRPC_CALL_OK;
}
- request_id = gpr_stack_lockfree_pop(server->request_freelist);
+ request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
if (request_id == -1) {
/* out of request ids: just fail this one */
fail_call(exec_ctx, server, cq_idx, rc,
- GRPC_ERROR_CREATE("Server Shutdown"));
+ grpc_error_set_int(GRPC_ERROR_CREATE("Out of request ids"),
+ GRPC_ERROR_INT_LIMIT,
+ server->max_requested_calls_per_cq));
return GRPC_CALL_OK;
}
switch (rc->type) {
@@ -1322,7 +1334,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &rc->data.registered.registered_method->request_matcher;
break;
}
- server->requested_calls[request_id] = *rc;
+ server->requested_calls_per_cq[cq_idx][request_id] = *rc;
gpr_free(rc);
if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
/* this was the first queued request: we need to lock and start
@@ -1346,7 +1358,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls[request_id]);
+ &server->requested_calls_per_cq[cq_idx][request_id]);
}
gpr_mu_lock(&server->mu_call);
}
@@ -1382,6 +1394,7 @@ grpc_call_error grpc_server_request_call(
}
grpc_cq_begin_op(cq_for_notification, tag);
details->reserved = NULL;
+ rc->cq_idx = cq_idx;
rc->type = BATCH_CALL;
rc->server = server;
rc->tag = tag;
@@ -1430,6 +1443,7 @@ grpc_call_error grpc_server_request_registered_call(
goto done;
}
grpc_cq_begin_op(cq_for_notification, tag);
+ rc->cq_idx = cq_idx;
rc->type = REGISTERED_CALL;
rc->server = server;
rc->tag = tag;
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index 53f3c43854..1942075054 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -36,4 +36,4 @@
#include <grpc/grpc.h>
-const char *grpc_version_string(void) { return "0.16.0-dev"; }
+const char *grpc_version_string(void) { return "1.1.0-dev"; }
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 054f112127..68d05e3a85 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -179,6 +179,9 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
while ((w = tracker->watchers) != NULL) {
*w->current = tracker->current_state;
tracker->watchers = w->next;
+ if (grpc_connectivity_state_trace) {
+ gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
+ }
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);
gpr_free(w);
diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc
index fb4c68ebe4..af04fd4ca6 100644
--- a/src/cpp/server/server.cc
+++ b/src/cpp/server/server.cc
@@ -281,6 +281,7 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
: max_message_size_(max_message_size),
started_(false),
shutdown_(false),
+ shutdown_notified_(false),
num_running_cb_(0),
sync_methods_(new std::list<SyncRequest>),
has_generic_service_(false),
@@ -462,13 +463,16 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
while (num_running_cb_ != 0) {
callback_cv_.wait(lock);
}
+
+ shutdown_notified_ = true;
+ shutdown_cv_.notify_all();
}
}
void Server::Wait() {
grpc::unique_lock<grpc::mutex> lock(mu_);
- while (num_running_cb_ != 0) {
- callback_cv_.wait(lock);
+ while (started_ && !shutdown_notified_) {
+ shutdown_cv_.wait(lock);
}
}
diff --git a/src/csharp/Grpc.Auth/project.json b/src/csharp/Grpc.Auth/project.json
index 72c258a91a..08429f1d46 100644
--- a/src/csharp/Grpc.Auth/project.json
+++ b/src/csharp/Grpc.Auth/project.json
@@ -1,5 +1,5 @@
{
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"title": "gRPC C# Auth",
"authors": [ "Google Inc." ],
"copyright": "Copyright 2015, Google Inc.",
@@ -22,7 +22,7 @@
}
},
"dependencies": {
- "Grpc.Core": "0.16.0-dev",
+ "Grpc.Core": "1.1.0-dev",
"Google.Apis.Auth": "1.11.1"
},
"frameworks": {
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index cb20967680..553aeec58a 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -48,11 +48,11 @@ namespace Grpc.Core
/// <summary>
/// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
/// </summary>
- public const string CurrentAssemblyFileVersion = "0.16.0.0";
+ public const string CurrentAssemblyFileVersion = "1.1.0.0";
/// <summary>
/// Current version of gRPC C#
/// </summary>
- public const string CurrentVersion = "0.16.0-dev";
+ public const string CurrentVersion = "1.1.0-dev";
}
}
diff --git a/src/csharp/Grpc.Core/project.json b/src/csharp/Grpc.Core/project.json
index 201e548801..4545d26aa5 100644
--- a/src/csharp/Grpc.Core/project.json
+++ b/src/csharp/Grpc.Core/project.json
@@ -1,5 +1,5 @@
{
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"title": "gRPC C# Core",
"authors": [ "Google Inc." ],
"copyright": "Copyright 2015, Google Inc.",
diff --git a/src/csharp/Grpc.HealthCheck/project.json b/src/csharp/Grpc.HealthCheck/project.json
index d9daef720f..0e03e89d6a 100644
--- a/src/csharp/Grpc.HealthCheck/project.json
+++ b/src/csharp/Grpc.HealthCheck/project.json
@@ -1,5 +1,5 @@
{
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"title": "gRPC C# Healthchecking",
"authors": [ "Google Inc." ],
"copyright": "Copyright 2015, Google Inc.",
@@ -22,7 +22,7 @@
}
},
"dependencies": {
- "Grpc.Core": "0.16.0-dev",
+ "Grpc.Core": "1.1.0-dev",
"Google.Protobuf": "3.0.0-beta3"
},
"frameworks": {
diff --git a/src/csharp/README.md b/src/csharp/README.md
index 86394135c8..18d5945a8a 100644
--- a/src/csharp/README.md
+++ b/src/csharp/README.md
@@ -23,9 +23,9 @@ HOW TO USE
- Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution.
-- Add NuGet package `Grpc` as a dependency (Project options -> Manage NuGet Packages).
+- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
-- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add NuGet package `Grpc.Tools` that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin.
+- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin.
BUILD FROM SOURCE
-----------------
diff --git a/src/csharp/build_packages.bat b/src/csharp/build_packages.bat
index 272b30f385..f05c0241b6 100644
--- a/src/csharp/build_packages.bat
+++ b/src/csharp/build_packages.bat
@@ -30,7 +30,7 @@
@rem Builds gRPC NuGet packages
@rem Current package versions
-set VERSION=0.16.0-dev
+set VERSION=1.1.0-dev
set PROTOBUF_VERSION=3.0.0-beta3
@rem Packages that depend on prerelease packages (like Google.Protobuf) need to have prerelease suffix as well.
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index c670ea65c7..3d0947c03d 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -253,8 +253,9 @@ GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_batch_context_recv_message_length(
if (!ctx->recv_message) {
return -1;
}
- /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */
- grpc_byte_buffer_reader_init(&reader, ctx->recv_message);
+
+ GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, ctx->recv_message));
+
return (intptr_t)grpc_byte_buffer_length(reader.buffer_out);
}
@@ -268,8 +269,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_recv_message_to_buffer(
gpr_slice slice;
size_t offset = 0;
- /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */
- grpc_byte_buffer_reader_init(&reader, ctx->recv_message);
+ GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, ctx->recv_message));
while (grpc_byte_buffer_reader_next(&reader, &slice)) {
size_t len = GPR_SLICE_LENGTH(slice);
diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json
index ad65b31917..67f5301df7 100644
--- a/src/node/health_check/package.json
+++ b/src/node/health_check/package.json
@@ -1,6 +1,6 @@
{
"name": "grpc-health-check",
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"author": "Google Inc.",
"description": "Health check service for use with gRPC",
"repository": {
diff --git a/src/node/tools/package.json b/src/node/tools/package.json
index 7c256d7ba0..e5513d7879 100644
--- a/src/node/tools/package.json
+++ b/src/node/tools/package.json
@@ -1,6 +1,6 @@
{
"name": "grpc-tools",
- "version": "0.16.0-dev",
+ "version": "1.1.0-dev",
"author": "Google Inc.",
"description": "Tools for developing with gRPC on Node.js",
"homepage": "http://www.grpc.io/",
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
new file mode 100644
index 0000000000..72cadb9319
--- /dev/null
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -0,0 +1,122 @@
+# CocoaPods podspec for the gRPC Proto Compiler Plugin
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Pod::Spec.new do |s|
+ # This pod is only a utility that will be used by other pods _at install time_ (not at compile
+ # time). Other pods can access it in their `prepare_command` script, under <pods_root>/<pod name>.
+ # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an
+ # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
+ # before them.
+ s.name = '!ProtoCompiler-gRPCPlugin'
+ v = '0.14.0'
+ s.version = v
+ s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
+ s.description = <<-DESC
+ This podspec only downloads the gRPC protoc plugin so that local pods generating protos can use
+ it in their invocation of protoc, as part of their prepare_command.
+ The generated code will have a dependency on the gRPC Objective-C Proto runtime of the same
+ version. The runtime can be obtained as the "gRPC-ProtoRPC" pod.
+ DESC
+ s.homepage = 'http://www.grpc.io'
+ s.license = {
+ :type => 'New BSD',
+ :text => <<-LICENSE
+ Copyright 2015, Google Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ LICENSE
+ }
+ s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
+
+ repo = 'grpc/grpc'
+ release = 'release-0_14_1'
+ file = "grpc_objective_c_plugin-#{v}-macos-x86_64.zip"
+ s.source = {
+ :http => "https://github.com/#{repo}/releases/download/#{release}/#{file}",
+ # TODO(jcanizales): Add sha1 or sha256
+ # :sha1 => '??',
+ }
+
+ repo_root = '../..'
+ plugin = 'grpc_objective_c_plugin'
+
+ s.preserve_paths = plugin
+
+ # Restrict the protoc version to the one supported by this plugin.
+ s.dependency '!ProtoCompiler', '3.0.0-beta-3.1'
+ # For the Protobuf dependency not to complain:
+ s.ios.deployment_target = '7.1'
+ s.osx.deployment_target = '10.9'
+ # Restrict the gRPC runtime version to the one supported by this plugin.
+ s.dependency 'gRPC-ProtoRPC', v
+
+ # This is only for local development of the plugin: If the Podfile brings this pod from a local
+ # directory using `:path`, CocoaPods won't download the zip file and so the plugin won't be
+ # present in this pod's directory. We use that knowledge to check for the existence of the file
+ # and, if absent, compile the plugin from the local sources.
+ s.prepare_command = <<-CMD
+ if [ ! -f #{plugin} ]; then
+ cd #{repo_root}
+ # This will build the plugin and put it in #{repo_root}/bins/opt.
+ #
+ # TODO(jcanizales): I reckon make will try to use locally-installed libprotoc (headers and
+ # library binary) if found, which _we do not want_. Find a way for this to always use the
+ # sources in the repo.
+ make #{plugin}
+ cd -
+ fi
+ CMD
+end
diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec
new file mode 100644
index 0000000000..5e59b25aee
--- /dev/null
+++ b/src/objective-c/!ProtoCompiler.podspec
@@ -0,0 +1,136 @@
+# Proto Compiler CocoaPods podspec
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Pod::Spec.new do |s|
+ # This pod is only a utility that will be used by other pods _at install time_ (not at compile
+ # time). Other pods can access it in their `prepare_command` script, under <pods_root>/<pod name>.
+ # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an
+ # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
+ # before them.
+ s.name = '!ProtoCompiler'
+ v = '3.0.0-beta-3.1'
+ s.version = v
+ s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
+ s.description = <<-DESC
+ This podspec only downloads protoc so that local pods generating protos can execute it as part
+ of their prepare_command.
+ The generated code will have a dependency on the Protobuf Objective-C runtime of the same
+ version. The runtime can be obtained as the "Protobuf" pod.
+ DESC
+ s.homepage = 'https://github.com/google/protobuf'
+ s.license = {
+ :type => 'New BSD',
+ :text => <<-LICENSE
+ This license applies to all parts of Protocol Buffers except the following:
+
+ - Atomicops support for generic gcc, located in
+ src/google/protobuf/stubs/atomicops_internals_generic_gcc.h.
+ This file is copyrighted by Red Hat Inc.
+
+ - Atomicops support for AIX/POWER, located in
+ src/google/protobuf/stubs/atomicops_internals_power.h.
+ This file is copyrighted by Bloomberg Finance LP.
+
+ Copyright 2014, Google Inc. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ Code generated by the Protocol Buffer compiler is owned by the owner
+ of the input file used when generating it. This code is not
+ standalone and requires a support library to be linked with it. This
+ support library is itself covered by the above license.
+ LICENSE
+ }
+ # "The name and email addresses of the library maintainers, not the Podspec maintainer."
+ s.authors = { 'The Protocol Buffers contributors' => 'protobuf@googlegroups.com' }
+
+ repo = 'google/protobuf'
+ release = "v#{v}"
+ file = "protoc-#{v}-osx-x86_64.zip"
+ s.source = {
+ :http => "https://github.com/#{repo}/releases/download/#{release}/#{file}",
+ # TODO(jcanizales): Add sha1 or sha256
+ # :sha1 => '??',
+ }
+
+ s.preserve_paths = 'protoc',
+ 'google/**/*.proto' # Well-known protobuf types
+
+ # Restrict the protobuf runtime version to the one supported by this version of protoc.
+ s.dependency 'Protobuf', v
+ # For the Protobuf dependency not to complain:
+ s.ios.deployment_target = '7.1'
+ s.osx.deployment_target = '10.9'
+
+ # This is only for local development of protoc: If the Podfile brings this pod from a local
+ # directory using `:path`, CocoaPods won't download the zip file and so the compiler won't be
+ # present in this pod's directory. We use that knowledge to check for the existence of the file
+ # and, if absent, build it from the local sources.
+ repo_root = '../..'
+ plugin = 'grpc_objective_c_plugin'
+ s.prepare_command = <<-CMD
+ if [ ! -f protoc ]; then
+ cd #{repo_root}
+ # This will build protoc from the Protobuf submodule of gRPC, and put it in
+ # #{repo_root}/bins/opt/protobuf.
+ #
+ # TODO(jcanizales): Make won't build protoc from sources if one's locally installed, which
+ # _we do not want_. Find a way for this to always build from source.
+ make #{plugin}
+ cd -
+ fi
+ CMD
+
+end
diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec
index 26a0451f7d..42b4434d0d 100644
--- a/src/objective-c/BoringSSL.podspec
+++ b/src/objective-c/BoringSSL.podspec
@@ -109,8 +109,6 @@ Pod::Spec.new do |s|
s.subspec 'Interface' do |ss|
ss.header_mappings_dir = 'include/openssl'
ss.source_files = 'include/openssl/*.h'
- # Doesn't compile correctly; but doesn't seem to be needed:
- ss.exclude_files = 'include/openssl/arm_arch.h'
end
s.subspec 'Implementation' do |ss|
ss.header_mappings_dir = '.'
@@ -147,6 +145,11 @@ Pod::Spec.new do |s|
#include "ssl.h"
#include "crypto.h"
#include "aes.h"
+ /* The following macros are defined by base.h. The latter is the first file included by the
+ other headers. */
+ #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
+ # include "arm_arch.h"
+ #endif
#include "asn1.h"
#include "asn1_mac.h"
#include "asn1t.h"
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index da9473f9a2..05a1d10f6d 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -208,13 +208,9 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
// don't want to throw, because the app shouldn't crash for a behavior
// that's on the hands of any server to have. Instead we finish and ask
// the server to cancel.
- //
- // TODO(jcanizales): No canonical code is appropriate for this situation
- // (because it's just a client problem). Use another domain and an
- // appropriately-documented code.
[weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeInternal
- userInfo:nil]];
+ code:GRPCErrorCodeResourceExhausted
+ userInfo:@{NSLocalizedDescriptionKey: @"Client does not have enough memory to hold the server response."}]];
[weakSelf cancelCall];
return;
}
diff --git a/src/objective-c/README.md b/src/objective-c/README.md
index 736c324ca9..a0ca5f448a 100644
--- a/src/objective-c/README.md
+++ b/src/objective-c/README.md
@@ -1,12 +1,12 @@
[![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC)
# gRPC for Objective-C
-- [Install protoc with the gRPC plugin](#install)
- [Write your API declaration in proto format](#write-protos)
- [Integrate a proto library in your project](#cocoapods)
- [Use the generated library in your code](#use)
- [Use gRPC without Protobuf](#no-proto)
-- [Alternative installation methods](#alternatives)
+- [Alternatives to the steps above](#alternatives)
+ - [Install protoc with the gRPC plugin](#install)
- [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew)
- [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods)
@@ -15,18 +15,6 @@ usage and adds some interoperability guarantees. Here we use [Protocol Buffers][
plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC
services.
-<a name="install"></a>
-## Install protoc with the gRPC plugin
-
-On Mac OS X, install [homebrew][].
-
-Run the following command to install _protoc_ and the gRPC _protoc_ plugin:
-```sh
-$ curl -fsSL https://goo.gl/getgrpc | bash -
-```
-This will download and run the [gRPC install script][]. After the command completes, you're ready to
-proceed.
-
<a name="write-protos"></a>
## Write your API declaration in proto format
@@ -40,7 +28,8 @@ Install [Cocoapods](https://cocoapods.org/#install).
You need to create a Podspec file for your proto library. You may simply copy the following example
to the directory where your `.proto` files are located, updating the name, version and license as
-necessary:
+necessary. You also need to set the `pods_root` variable to the correct value, depending on where
+you place this podspec relative to your Podfile.
```ruby
Pod::Spec.new do |s|
@@ -55,16 +44,44 @@ Pod::Spec.new do |s|
s.ios.deployment_target = '7.1'
s.osx.deployment_target = '10.9'
+ # Base directory where the .proto files are.
+ src = '.'
+
+ # We'll use protoc with the gRPC plugin.
+ s.dependency '!ProtoCompiler-gRPCPlugin', '~> 0.14'
+
+ # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+ pods_root = '<path to your Podfile>/Pods'
+
+ # Path where Cocoapods downloads protoc and the gRPC plugin.
+ protoc_dir = "#{pods_root}/!ProtoCompiler"
+ protoc = "#{protoc_dir}/protoc"
+ plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
+ # Directory where you want the generated files to be placed. This is an example.
+ dir = "#{pods_root}/#{s.name}"
+
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
# You can run this command manually if you later change your protos and need to regenerate.
- s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto"
+ # Alternatively, you can advance the version of this podspec and run `pod update`.
+ s.prepare_command = <<-CMD
+ mkdir -p #{dir}
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=#{dir} \
+ --grpc_out=#{dir} \
+ -I #{src} \
+ -I #{protoc_dir} \
+ #{src}/*.proto
+ CMD
# The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file.
- s.subspec "Messages" do |ms|
- ms.source_files = "*.pbobjc.{h,m}"
- ms.header_mappings_dir = "."
+ s.subspec 'Messages' do |ms|
+ ms.source_files = "#{dir}/*.pbobjc.{h,m}"
+ ms.header_mappings_dir = dir
ms.requires_arc = false
- ms.dependency "Protobuf", "~> 3.0.0-beta-2"
+ # The generated files depend on the protobuf runtime.
+ ms.dependency 'Protobuf'
# This is needed by all pods that depend on Protobuf:
ms.pod_target_xcconfig = {
'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
@@ -73,11 +90,12 @@ Pod::Spec.new do |s|
# The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with
# a service defined.
- s.subspec "Services" do |ss|
- ss.source_files = "*.pbrpc.{h,m}"
- ss.header_mappings_dir = "."
+ s.subspec 'Services' do |ss|
+ ss.source_files = "#{dir}/*.pbrpc.{h,m}"
+ ss.header_mappings_dir = dir
ss.requires_arc = true
- ss.dependency "gRPC-ProtoRPC", "~> 0.14"
+ # The generated files depend on the gRPC runtime, and on the files generated by `--objc_out`.
+ ss.dependency 'gRPC-ProtoRPC'
ss.dependency "#{s.name}/Messages"
end
end
@@ -89,11 +107,14 @@ Note: If your proto files are in a directory hierarchy, you might want to adjust
the sample Podspec above. For example, you could use:
```ruby
- s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
+ s.prepare_command = <<-CMD
+ ...
+ #{src}/*.proto #{src}/**/*.proto
+ CMD
...
- ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
+ ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
...
- ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
+ ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
```
Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into
@@ -121,19 +142,33 @@ pod install
<a name="use"></a>
## Use the generated library in your code
-Please check this [sample app][] for examples of how to use a generated gRPC library.
+Please check the [example apps][] for examples of how to use a generated gRPC library.
<a name="no-proto"></a>
## Use gRPC without Protobuf
-The [sample app][] has an example of how to use the generic gRPC Objective-C client without
-generated files.
+This [tests file](https://github.com/grpc/grpc/tree/master/src/objective-c/tests/GRPCClientTests.m)
+shows how to use the generic gRPC Objective-C client without generated protobuf files.
<a name="alternatives"></a>
-## Alternative installation methods
+## Alternatives to the steps above
+
+<a name="install"></a>
+### Install _protoc_ with the gRPC plugin
+
+Although it's not recommended (because it can lead to hard-to-solve version conflicts), it is
+sometimes more convenient to install _protoc_ and the gRPC plugin in your development machine,
+instead of letting Cocoapods download the appropriate versions for you. To do so, on Mac OS X or
+later, install [homebrew][].
+
+The run the following command to install _protoc_ and the gRPC _protoc_ plugin:
+```sh
+$ curl -fsSL https://goo.gl/getgrpc | bash -
+```
+This will download and run the [gRPC install script][].
<a name="no-homebrew"></a>
-### Install protoc and the gRPC plugin without using Homebrew
+### Install _protoc_ and the gRPC plugin without using Homebrew
First install v3 of the Protocol Buffers compiler (_protoc_), by cloning
[its Git repository](https://github.com/google/protobuf) and following these
@@ -145,15 +180,15 @@ cloned.
Compile the gRPC plugins for _protoc_:
```sh
-make plugins
+make grpc_objective_c_plugin
```
Create a symbolic link to the compiled plugin binary somewhere in your `$PATH`:
```sh
ln -s `pwd`/bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc
```
-(Notice that the name of the created link must begin with "protoc-gen-" for _protoc_ to recognize it
-as a plugin).
+(Notice that the name of the created link must begin with "`protoc-gen-`" for _protoc_ to recognize
+it as a plugin).
If you don't want to create the symbolic link, you can alternatively copy the binary (with the
appropriate name). Or you might prefer instead to specify the plugin's path as a flag when invoking
@@ -178,5 +213,5 @@ Objective-C Protobuf runtime library.
[Protocol Buffers]:https://developers.google.com/protocol-buffers/
[homebrew]:http://brew.sh
[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
-[example Podfile]:https://github.com/grpc/grpc/blob/master/src/objective-c/examples/Sample/Podfile
-[sample app]: https://github.com/grpc/grpc/tree/master/src/objective-c/examples/Sample
+[example Podfile]:https://github.com/grpc/grpc/blob/master/examples/objective-c/helloworld/Podfile
+[example apps]: https://github.com/grpc/grpc/tree/master/examples/objective-c
diff --git a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
index e3b50ddea5..6e783fb5ad 100644
--- a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
@@ -11,15 +11,30 @@ Pod::Spec.new do |s|
s.osx.deployment_target = '10.9'
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+ s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+ repo_root = '../../../..'
+ bin_dir = "#{repo_root}/bins/$CONFIG"
+
+ protoc = "#{bin_dir}/protobuf/protoc"
+ well_known_types_dir = "#{repo_root}/third_party/protobuf/src"
+ plugin = "#{bin_dir}/grpc_objective_c_plugin"
+
s.prepare_command = <<-CMD
- protoc --objc_out=. --objcgrpc_out=. *.proto
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=. \
+ --grpc_out=. \
+ -I . \
+ -I #{well_known_types_dir} \
+ *.proto
CMD
s.subspec 'Messages' do |ms|
ms.source_files = '*.pbobjc.{h,m}'
ms.header_mappings_dir = '.'
ms.requires_arc = false
- ms.dependency 'Protobuf', '~> 3.0.0-beta-3.1'
+ ms.dependency 'Protobuf'
# This is needed by all pods that depend on Protobuf:
ms.pod_target_xcconfig = {
'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
@@ -30,7 +45,7 @@ Pod::Spec.new do |s|
ss.source_files = '*.pbrpc.{h,m}'
ss.header_mappings_dir = '.'
ss.requires_arc = true
- ss.dependency 'gRPC-ProtoRPC', '~> 0.14'
+ ss.dependency 'gRPC-ProtoRPC'
ss.dependency "#{s.name}/Messages"
end
end
diff --git a/src/objective-c/examples/Sample/Podfile b/src/objective-c/examples/Sample/Podfile
index 80ab2c320d..8740b2f963 100644
--- a/src/objective-c/examples/Sample/Podfile
+++ b/src/objective-c/examples/Sample/Podfile
@@ -12,6 +12,9 @@ target 'Sample' do
# Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
# lines in your application.
+ pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+ pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
diff --git a/src/objective-c/examples/SwiftSample/Podfile b/src/objective-c/examples/SwiftSample/Podfile
index b675fd29ef..2f783340f5 100644
--- a/src/objective-c/examples/SwiftSample/Podfile
+++ b/src/objective-c/examples/SwiftSample/Podfile
@@ -12,6 +12,9 @@ target 'SwiftSample' do
# Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
# lines in your application.
+ pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+ pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
diff --git a/src/objective-c/tests/Podfile b/src/objective-c/tests/Podfile
index 30a34260d4..17b9740d52 100644
--- a/src/objective-c/tests/Podfile
+++ b/src/objective-c/tests/Podfile
@@ -17,12 +17,18 @@ GRPC_LOCAL_SRC = '../../..'
).each do |target_name|
target target_name do
pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf", :inhibit_warnings => true
- pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true
+
+ pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+ pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+ pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true
pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
- pod 'gRPC', :path => GRPC_LOCAL_SRC
- pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
+
+ pod 'gRPC', :path => GRPC_LOCAL_SRC
+ pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC
+
pod 'RemoteTest', :path => "RemoteTestClient"
end
end
@@ -65,7 +71,7 @@ post_install do |installer|
end
if target.name == 'gRPC-Core'
target.build_configurations.each do |config|
- # TODO(zyc) Remove this setting after the issue is resolved
+ # TODO(zyc): Remove this setting after the issue is resolved
# GPR_UNREACHABLE_CODE causes "Control may reach end of non-void
# function" warning
config.build_settings['GCC_WARN_ABOUT_RETURN_TYPE'] = 'NO'
diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
index 25c9c7f841..7d84a5ae4d 100644
--- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
@@ -11,20 +11,30 @@ Pod::Spec.new do |s|
s.osx.deployment_target = '10.9'
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+ s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+ repo_root = '../../../..'
+ bin_dir = "#{repo_root}/bins/$CONFIG"
+
+ protoc = "#{bin_dir}/protobuf/protoc"
+ well_known_types_dir = "#{repo_root}/third_party/protobuf/src"
+ plugin = "#{bin_dir}/grpc_objective_c_plugin"
+
s.prepare_command = <<-CMD
- BINDIR=../../../../bins/$CONFIG
- PROTOC=$BINDIR/protobuf/protoc
- PLUGIN=$BINDIR/grpc_objective_c_plugin
- # we use this path to locate well-known proto files
- PROTO_SRC=../../../../third_party/protobuf/src
- $PROTOC --plugin=protoc-gen-grpc=$PLUGIN --objc_out=. --grpc_out=. *.proto -I $PROTO_SRC -I .
+ #{protoc} \
+ --plugin=protoc-gen-grpc=#{plugin} \
+ --objc_out=. \
+ --grpc_out=. \
+ -I . \
+ -I #{well_known_types_dir} \
+ *.proto
CMD
s.subspec "Messages" do |ms|
ms.source_files = "*.pbobjc.{h,m}"
ms.header_mappings_dir = "."
ms.requires_arc = false
- ms.dependency "Protobuf", "~> 3.0.0-beta-3.1"
+ ms.dependency "Protobuf"
# This is needed by all pods that depend on Protobuf:
ms.pod_target_xcconfig = {
'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
@@ -35,7 +45,7 @@ Pod::Spec.new do |s|
ss.source_files = "*.pbrpc.{h,m}"
ss.header_mappings_dir = "."
ss.requires_arc = true
- ss.dependency "gRPC-ProtoRPC", "~> 0.14"
+ ss.dependency "gRPC-ProtoRPC"
ss.dependency "#{s.name}/Messages"
end
end
diff --git a/src/php/README.md b/src/php/README.md
index 6cc1ba4d46..8abedc40a3 100644
--- a/src/php/README.md
+++ b/src/php/README.md
@@ -5,7 +5,7 @@ This directory contains source code for PHP implementation of gRPC layered on sh
#Status
-Beta
+GA
## Environment
@@ -43,7 +43,7 @@ $ sudo mv phpunit-old.phar /usr/bin/phpunit
Install the gRPC PHP extension
```sh
-sudo pecl install grpc-beta
+sudo pecl install grpc
```
This will compile and install the gRPC PHP extension into the standard PHP extension directory. You should be able to run the [unit tests](#unit-tests), with the PHP extension installed.
@@ -75,7 +75,7 @@ $ sudo make install
Install the gRPC PHP extension from PECL
```sh
-$ sudo pecl install grpc-beta
+$ sudo pecl install grpc
```
Or, compile from source
@@ -148,7 +148,7 @@ Alternatively, you can download `protoc` binaries from [the protocol buffers Git
You need to install `protoc-gen-php` to generate stub class `.php` files from service definition `.proto` files.
```sh
-$ cd grpc/src/php/vendor/datto/protobuf-php # if you had run `composer install` in the previous step
+$ cd grpc/src/php/vendor/stanley-cheung/protobuf-php # if you had run `composer install` in the previous step
OR
diff --git a/src/php/composer.json b/src/php/composer.json
index 2ad73223c6..23bfcedbe6 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -5,16 +5,11 @@
"keywords": ["rpc"],
"homepage": "http://grpc.io",
"license": "BSD-3-Clause",
- "repositories": [
- {
- "type": "vcs",
- "url": "https://github.com/stanley-cheung/Protobuf-PHP"
- }
- ],
+ "version": "1.0.0",
"require": {
"php": ">=5.5.0",
- "datto/protobuf-php": "dev-master",
- "google/auth": "v0.7"
+ "stanley-cheung/protobuf-php": "dev-master",
+ "google/auth": "v0.9"
},
"autoload": {
"psr-4": {
diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto
index 20496a8116..ece6910815 100644
--- a/src/proto/grpc/testing/control.proto
+++ b/src/proto/grpc/testing/control.proto
@@ -229,4 +229,7 @@ message ScenarioResult {
repeated int32 server_cores = 5;
// An after-the-fact computed summary
ScenarioResultSummary summary = 6;
+ // Information on success or failure of each worker
+ repeated bool client_success = 7;
+ repeated bool server_success = 8;
}
diff --git a/src/python/grpcio/_unixccompiler_patch.py b/src/python/grpcio/_unixccompiler_patch.py
index 9a697989b3..0ce5d63e98 100644
--- a/src/python/grpcio/_unixccompiler_patch.py
+++ b/src/python/grpcio/_unixccompiler_patch.py
@@ -38,84 +38,36 @@ import shutil
import sys
import tempfile
+def _unix_commandfile_spawn(self, command):
+ """Wrapper around distutils.util.spawn that attempts to use command files.
-def _unix_piecemeal_link(
- self, target_desc, objects, output_filename, output_dir=None,
- libraries=None, library_dirs=None, runtime_library_dirs=None,
- export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None,
- build_temp=None, target_lang=None):
- """`link` externalized method taken almost verbatim from UnixCCompiler.
+ Meant to replace the CCompiler method `spawn` on UnixCCompiler and its
+ derivatives (e.g. the MinGW32 compiler).
- Modifies the link command for unix-like compilers by using a command file so
- that long command line argument strings don't break the command shell's
- ARG_MAX character limit.
+ Some commands like `gcc` (and friends like `clang`) support command files to
+ work around shell command length limits.
"""
- objects, output_dir = self._fix_object_args(objects, output_dir)
- libraries, library_dirs, runtime_library_dirs = self._fix_lib_args(
- libraries, library_dirs, runtime_library_dirs)
- # filter out standard library paths, which are not explicitely needed
- # for linking
- library_dirs = [dir for dir in library_dirs
- if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')]
- runtime_library_dirs = [dir for dir in runtime_library_dirs
- if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')]
- lib_opts = ccompiler.gen_lib_options(self, library_dirs, runtime_library_dirs,
- libraries)
- if (not (isinstance(output_dir, str) or isinstance(output_dir, bytes))
- and output_dir is not None):
- raise TypeError("'output_dir' must be a string or None")
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- ld_args = (objects + self.objects +
- lib_opts + ['-o', output_filename])
- if debug:
- ld_args[:0] = ['-g']
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
- self.mkpath(os.path.dirname(output_filename))
- try:
- if target_desc == ccompiler.CCompiler.EXECUTABLE:
- linker = self.linker_exe[:]
- else:
- linker = self.linker_so[:]
- if target_lang == "c++" and self.compiler_cxx:
- # skip over environment variable settings if /usr/bin/env
- # is used to set up the linker's environment.
- # This is needed on OSX. Note: this assumes that the
- # normal and C++ compiler have the same environment
- # settings.
- i = 0
- if os.path.basename(linker[0]) == "env":
- i = 1
- while '=' in linker[i]:
- i = i + 1
-
- linker[i] = self.compiler_cxx[i]
-
- if sys.platform == 'darwin':
- import _osx_support
- linker = _osx_support.compiler_fixup(linker, ld_args)
-
- temporary_directory = tempfile.mkdtemp()
- command_filename = os.path.abspath(
- os.path.join(temporary_directory, 'command'))
- with open(command_filename, 'w') as command_file:
- escaped_ld_args = [arg.replace('\\', '\\\\') for arg in ld_args]
- command_file.write(' '.join(escaped_ld_args))
- self.spawn(linker + ['@{}'.format(command_filename)])
- except errors.DistutilsExecError:
- raise ccompiler.LinkError
+ command_base = os.path.basename(command[0].strip())
+ if command_base == 'ccache':
+ command_base = command[:2]
+ command_args = command[2:]
+ elif command_base.startswith('ccache') or command_base in ['gcc', 'clang', 'clang++', 'g++']:
+ command_base = command[:1]
+ command_args = command[1:]
else:
- log.debug("skipping %s (up-to-date)", output_filename)
+ return ccompiler.CCompiler.spawn(self, command)
+ temporary_directory = tempfile.mkdtemp()
+ command_filename = os.path.abspath(os.path.join(temporary_directory, 'command'))
+ with open(command_filename, 'w') as command_file:
+ escaped_args = [arg.replace('\\', '\\\\') for arg in command_args]
+ command_file.write(' '.join(escaped_args))
+ modified_command = command_base + ['@{}'.format(command_filename)]
+ result = ccompiler.CCompiler.spawn(self, modified_command)
+ shutil.rmtree(temporary_directory)
+ return result
+
-# TODO(atash) try replacing this monkeypatch of the compiler harness' link
-# operation with a monkeypatch of the distutils `spawn` that applies
-# command-argument-file hacks where it can. Might be cleaner.
def monkeypatch_unix_compiler():
"""Monkeypatching is dumb, but it's either that or we become maintainers of
something much, much bigger."""
- unixccompiler.UnixCCompiler.link = _unix_piecemeal_link
+ unixccompiler.UnixCCompiler.spawn = _unix_commandfile_spawn
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 3f91954d5f..86a73fa836 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -58,10 +58,31 @@ CONF_PY_ADDENDUM = """
extensions.append('sphinx.ext.napoleon')
napoleon_google_docstring = True
napoleon_numpy_docstring = True
+napoleon_include_special_with_doc = True
html_theme = 'sphinx_rtd_theme'
"""
+API_GLOSSARY = """
+
+Glossary
+================
+
+.. glossary::
+
+ metadatum
+ A key-value pair included in the HTTP header. It is a
+ 2-tuple where the first entry is the key and the
+ second is the value, i.e. (key, value). The metadata key is an ASCII str,
+ and must be a valid HTTP header name. The metadata value can be
+ either a valid HTTP ASCII str, or bytes. If bytes are provided,
+ the key must end with '-bin', i.e.
+ ``('binary-metadata-bin', b'\\x00\\xFF')``
+
+ metadata
+ A sequence of metadatum.
+"""
+
class CommandError(Exception):
"""Simple exception class for GRPC custom commands."""
@@ -131,6 +152,9 @@ class SphinxDocumentation(setuptools.Command):
conf_filepath = os.path.join('doc', 'src', 'conf.py')
with open(conf_filepath, 'a') as conf_file:
conf_file.write(CONF_PY_ADDENDUM)
+ glossary_filepath = os.path.join('doc', 'src', 'grpc.rst')
+ with open(glossary_filepath, 'a') as glossary_filepath:
+ glossary_filepath.write(API_GLOSSARY)
sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index afd7db1f75..fd015129f0 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -312,7 +312,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
This method blocks until the value is available.
Returns:
- The initial metadata as a sequence of pairs of bytes.
+ The initial :term:`metadata`.
"""
raise NotImplementedError()
@@ -323,7 +323,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
This method blocks until the value is available.
Returns:
- The trailing metadata as a sequence of pairs of bytes.
+ The trailing :term:`metadata`.
"""
raise NotImplementedError()
@@ -394,8 +394,7 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
"""Inform the gRPC runtime of the metadata to construct a CallCredentials.
Args:
- metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value
- pairs.
+ metadata: The :term:`metadata` used to construct the CallCredentials.
error: An Exception to indicate error or None to indicate success.
"""
raise NotImplementedError()
@@ -442,7 +441,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -463,7 +462,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request: The request value for the RPC.
timeout: An optional durating of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -484,7 +483,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -507,7 +506,7 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: An optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -530,7 +529,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -553,7 +552,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -575,7 +574,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -599,7 +598,7 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
- metadata: An optional sequence of pairs of bytes to be transmitted to the
+ metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
@@ -707,7 +706,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""Accesses the metadata from the invocation-side of the RPC.
Returns:
- The invocation metadata object as a sequence of pairs of bytes.
+ The invocation :term:`metadata`.
"""
raise NotImplementedError()
@@ -728,8 +727,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
service-side initial metadata to transmit.
Args:
- initial_metadata: The initial metadata of the RPC as a sequence of pairs
- of bytes.
+ initial_metadata: The initial :term:`metadata`.
"""
raise NotImplementedError()
@@ -741,8 +739,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
service-side trailing metadata to transmit.
Args:
- trailing_metadata: The trailing metadata of the RPC as a sequence of pairs
- of bytes.
+ trailing_metadata: The trailing :term:`metadata`.
"""
raise NotImplementedError()
@@ -815,7 +812,7 @@ class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
"""Describes an RPC that has just arrived for service.
Attributes:
method: The method name of the RPC.
- invocation_metadata: The metadata from the invocation side of the RPC.
+ invocation_metadata: The :term:`metadata` from the invocation side of the RPC.
"""
@@ -1219,9 +1216,9 @@ def server(thread_pool, handlers=None):
to service RPCs.
handlers: An optional sequence of GenericRpcHandlers to be used to service
RPCs after the returned Server is started. These handlers need not be the
- only handlers the returned Server will use to service RPCs; other
- handlers may later be added to the returned Server by calling its
- add_generic_rpc_handlers method any time before it is started.
+ only handlers the server will use to service RPCs; other handlers may
+ later be added by calling add_generic_rpc_handlers any time before the
+ returned Server is started.
Returns:
A Server with which RPCs can be serviced.
diff --git a/src/python/grpcio/grpc/_adapter/.gitignore b/src/python/grpcio/grpc/_adapter/.gitignore
deleted file mode 100644
index a6f96cd6db..0000000000
--- a/src/python/grpcio/grpc/_adapter/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.a
-*.so
-*.dll
-*.pyc
-*.pyd
diff --git a/src/python/grpcio/grpc/_adapter/__init__.py b/src/python/grpcio/grpc/_adapter/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio/grpc/_adapter/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/_adapter/_common.py b/src/python/grpcio/grpc/_adapter/_common.py
deleted file mode 100644
index 492849f4cb..0000000000
--- a/src/python/grpcio/grpc/_adapter/_common.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State used by both invocation-side and service-side code."""
-
-import enum
-
-
-@enum.unique
-class HighWrite(enum.Enum):
- """The possible categories of high-level write state."""
-
- OPEN = 'OPEN'
- CLOSED = 'CLOSED'
-
-
-class WriteState(object):
- """A description of the state of writing to an RPC.
-
- Attributes:
- low: A side-specific value describing the low-level state of writing.
- high: A HighWrite value describing the high-level state of writing.
- pending: A list of bytestrings for the RPC waiting to be written to the
- other side of the RPC.
- """
-
- def __init__(self, low, high, pending):
- self.low = low
- self.high = high
- self.pending = pending
-
-
-class CommonRPCState(object):
- """A description of an RPC's state.
-
- Attributes:
- write: A WriteState describing the state of writing to the RPC.
- sequence_number: The lowest-unused sequence number for use in generating
- tickets locally describing the progress of the RPC.
- deserializer: The behavior to be used to deserialize payload bytestreams
- taken off the wire.
- serializer: The behavior to be used to serialize payloads to be sent on the
- wire.
- """
-
- def __init__(self, write, sequence_number, deserializer, serializer):
- self.write = write
- self.sequence_number = sequence_number
- self.deserializer = deserializer
- self.serializer = serializer
diff --git a/src/python/grpcio/grpc/_adapter/_intermediary_low.py b/src/python/grpcio/grpc/_adapter/_intermediary_low.py
deleted file mode 100644
index 9698ffeabf..0000000000
--- a/src/python/grpcio/grpc/_adapter/_intermediary_low.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Temporary old _low-like layer.
-
-Eases refactoring burden while we overhaul the Python framework.
-
-Plan:
- The layers used to look like:
- ... # outside _adapter
- fore.py + rear.py # visible outside _adapter
- _low
- _c
- The layers currently look like:
- ... # outside _adapter
- fore.py + rear.py # visible outside _adapter
- _low_intermediary # adapter for new '_low' to old '_low'
- _low # new '_low'
- _c # new '_c'
- We will later remove _low_intermediary after refactoring of fore.py and
- rear.py according to the ticket system refactoring and get:
- ... # outside _adapter, refactored
- fore.py + rear.py # visible outside _adapter, refactored
- _low # new '_low'
- _c # new '_c'
-"""
-
-import collections
-import enum
-
-from grpc._adapter import _low
-from grpc._adapter import _types
-
-_IGNORE_ME_TAG = object()
-Code = _types.StatusCode
-WriteFlags = _types.OpWriteFlags
-
-
-class Status(collections.namedtuple('Status', ['code', 'details'])):
- """Describes an RPC's overall status."""
-
-
-class ServiceAcceptance(
- collections.namedtuple(
- 'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])):
- """Describes an RPC on the service side at the start of service."""
-
-
-class Event(
- collections.namedtuple(
- 'Event',
- ['kind', 'tag', 'write_accepted', 'complete_accepted',
- 'service_acceptance', 'bytes', 'status', 'metadata'])):
- """Describes an event emitted from a completion queue."""
-
- @enum.unique
- class Kind(enum.Enum):
- """Describes the kind of an event."""
-
- STOP = object()
- WRITE_ACCEPTED = object()
- COMPLETE_ACCEPTED = object()
- SERVICE_ACCEPTED = object()
- READ_ACCEPTED = object()
- METADATA_ACCEPTED = object()
- FINISH = object()
-
-
-class _TagAdapter(collections.namedtuple('_TagAdapter', [
- 'user_tag',
- 'kind'
- ])):
- pass
-
-
-class Call(object):
- """Adapter from old _low.Call interface to new _low.Call."""
-
- def __init__(self, channel, completion_queue, method, host, deadline):
- self._internal = channel._internal.create_call(
- completion_queue._internal, method, host, deadline)
- self._metadata = []
-
- @staticmethod
- def _from_internal(internal):
- call = Call.__new__(Call)
- call._internal = internal
- call._metadata = []
- return call
-
- def invoke(self, completion_queue, metadata_tag, finish_tag):
- err = self._internal.start_batch([
- _types.OpArgs.send_initial_metadata(self._metadata)
- ], _IGNORE_ME_TAG)
- if err != _types.CallError.OK:
- return err
- err = self._internal.start_batch([
- _types.OpArgs.recv_initial_metadata()
- ], _TagAdapter(metadata_tag, Event.Kind.METADATA_ACCEPTED))
- if err != _types.CallError.OK:
- return err
- err = self._internal.start_batch([
- _types.OpArgs.recv_status_on_client()
- ], _TagAdapter(finish_tag, Event.Kind.FINISH))
- return err
-
- def write(self, message, tag, flags):
- return self._internal.start_batch([
- _types.OpArgs.send_message(message, flags)
- ], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED))
-
- def complete(self, tag):
- return self._internal.start_batch([
- _types.OpArgs.send_close_from_client()
- ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED))
-
- def accept(self, completion_queue, tag):
- return self._internal.start_batch([
- _types.OpArgs.recv_close_on_server()
- ], _TagAdapter(tag, Event.Kind.FINISH))
-
- def add_metadata(self, key, value):
- self._metadata.append((key, value))
-
- def premetadata(self):
- result = self._internal.start_batch([
- _types.OpArgs.send_initial_metadata(self._metadata)
- ], _IGNORE_ME_TAG)
- self._metadata = []
- return result
-
- def read(self, tag):
- return self._internal.start_batch([
- _types.OpArgs.recv_message()
- ], _TagAdapter(tag, Event.Kind.READ_ACCEPTED))
-
- def status(self, status, tag):
- return self._internal.start_batch([
- _types.OpArgs.send_status_from_server(
- self._metadata, status.code, status.details)
- ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED))
-
- def cancel(self):
- return self._internal.cancel()
-
- def peer(self):
- return self._internal.peer()
-
- def set_credentials(self, creds):
- return self._internal.set_credentials(creds)
-
-
-class Channel(object):
- """Adapter from old _low.Channel interface to new _low.Channel."""
-
- def __init__(self, hostport, channel_credentials, server_host_override=None):
- args = []
- if server_host_override:
- args.append((_types.GrpcChannelArgumentKeys.SSL_TARGET_NAME_OVERRIDE.value, server_host_override))
- self._internal = _low.Channel(hostport, args, channel_credentials)
-
-
-class CompletionQueue(object):
- """Adapter from old _low.CompletionQueue interface to new _low.CompletionQueue."""
-
- def __init__(self):
- self._internal = _low.CompletionQueue()
-
- def get(self, deadline=None):
- if deadline is None:
- ev = self._internal.next(float('+inf'))
- else:
- ev = self._internal.next(deadline)
- if ev is None:
- return None
- elif ev.tag is _IGNORE_ME_TAG:
- return self.get(deadline)
- elif ev.type == _types.EventType.QUEUE_SHUTDOWN:
- kind = Event.Kind.STOP
- tag = None
- write_accepted = None
- complete_accepted = None
- service_acceptance = None
- message_bytes = None
- status = None
- metadata = None
- elif ev.type == _types.EventType.OP_COMPLETE:
- kind = ev.tag.kind
- tag = ev.tag.user_tag
- write_accepted = ev.success if kind == Event.Kind.WRITE_ACCEPTED else None
- complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None
- service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None
- message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None
- status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None
- metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None)
- else:
- raise RuntimeError('unknown event')
- result_ev = Event(kind=kind, tag=tag, write_accepted=write_accepted, complete_accepted=complete_accepted, service_acceptance=service_acceptance, bytes=message_bytes, status=status, metadata=metadata)
- return result_ev
-
- def stop(self):
- self._internal.shutdown()
-
-
-class Server(object):
- """Adapter from old _low.Server interface to new _low.Server."""
-
- def __init__(self, completion_queue):
- self._internal = _low.Server(completion_queue._internal, [])
- self._internal_cq = completion_queue._internal
-
- def add_http2_addr(self, addr):
- return self._internal.add_http2_port(addr)
-
- def add_secure_http2_addr(self, addr, server_credentials):
- if server_credentials is None:
- return self._internal.add_http2_port(addr, None)
- else:
- return self._internal.add_http2_port(addr, server_credentials)
-
- def start(self):
- return self._internal.start()
-
- def service(self, tag):
- return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED))
-
- def cancel_all_calls(self):
- self._internal.cancel_all_calls()
-
- def stop(self):
- return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP))
-
diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py
deleted file mode 100644
index 48410167a0..0000000000
--- a/src/python/grpcio/grpc/_adapter/_low.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import threading
-
-from grpc import _grpcio_metadata
-from grpc import _plugin_wrapping
-from grpc._cython import cygrpc
-from grpc._adapter import _types
-
-_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
-
-ChannelCredentials = cygrpc.ChannelCredentials
-CallCredentials = cygrpc.CallCredentials
-ServerCredentials = cygrpc.ServerCredentials
-
-channel_credentials_composite = cygrpc.channel_credentials_composite
-call_credentials_composite = cygrpc.call_credentials_composite
-
-def server_credentials_ssl(root_credentials, pair_sequence, force_client_auth):
- return cygrpc.server_credentials_ssl(
- root_credentials,
- [cygrpc.SslPemKeyCertPair(key, pem) for key, pem in pair_sequence],
- force_client_auth)
-
-def channel_credentials_ssl(
- root_certificates, private_key, certificate_chain):
- pair = None
- if private_key is not None or certificate_chain is not None:
- pair = cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
- return cygrpc.channel_credentials_ssl(root_certificates, pair)
-
-
-call_credentials_metadata_plugin = (
- _plugin_wrapping.call_credentials_metadata_plugin)
-
-
-class CompletionQueue(_types.CompletionQueue):
-
- def __init__(self):
- self.completion_queue = cygrpc.CompletionQueue()
-
- def next(self, deadline=float('+inf')):
- raw_event = self.completion_queue.poll(cygrpc.Timespec(deadline))
- if raw_event.type == cygrpc.CompletionType.queue_timeout:
- return None
- event_type = raw_event.type
- event_tag = raw_event.tag
- event_call = Call(raw_event.operation_call)
- if raw_event.request_call_details:
- event_call_details = _types.CallDetails(
- raw_event.request_call_details.method,
- raw_event.request_call_details.host,
- float(raw_event.request_call_details.deadline))
- else:
- event_call_details = None
- event_success = raw_event.success
- event_results = []
- if raw_event.is_new_request:
- event_results.append(_types.OpResult(
- _types.OpType.RECV_INITIAL_METADATA, raw_event.request_metadata,
- None, None, None, None))
- else:
- if raw_event.batch_operations:
- for operation in raw_event.batch_operations:
- result_type = operation.type
- result_initial_metadata = operation.received_metadata_or_none
- result_trailing_metadata = operation.received_metadata_or_none
- result_message = operation.received_message_or_none
- if result_message is not None:
- result_message = result_message.bytes()
- result_cancelled = operation.received_cancelled_or_none
- if operation.has_status:
- result_status = _types.Status(
- operation.received_status_code_or_none,
- operation.received_status_details_or_none)
- else:
- result_status = None
- event_results.append(
- _types.OpResult(result_type, result_initial_metadata,
- result_trailing_metadata, result_message,
- result_status, result_cancelled))
- return _types.Event(event_type, event_tag, event_call, event_call_details,
- event_results, event_success)
-
- def shutdown(self):
- self.completion_queue.shutdown()
-
-
-class Call(_types.Call):
-
- def __init__(self, call):
- self.call = call
-
- def start_batch(self, ops, tag):
- translated_ops = []
- for op in ops:
- if op.type == _types.OpType.SEND_INITIAL_METADATA:
- translated_op = cygrpc.operation_send_initial_metadata(
- cygrpc.Metadata(
- cygrpc.Metadatum(key, value)
- for key, value in op.initial_metadata),
- op.flags)
- elif op.type == _types.OpType.SEND_MESSAGE:
- translated_op = cygrpc.operation_send_message(op.message, op.flags)
- elif op.type == _types.OpType.SEND_CLOSE_FROM_CLIENT:
- translated_op = cygrpc.operation_send_close_from_client(op.flags)
- elif op.type == _types.OpType.SEND_STATUS_FROM_SERVER:
- translated_op = cygrpc.operation_send_status_from_server(
- cygrpc.Metadata(
- cygrpc.Metadatum(key, value)
- for key, value in op.trailing_metadata),
- op.status.code,
- op.status.details,
- op.flags)
- elif op.type == _types.OpType.RECV_INITIAL_METADATA:
- translated_op = cygrpc.operation_receive_initial_metadata(
- op.flags)
- elif op.type == _types.OpType.RECV_MESSAGE:
- translated_op = cygrpc.operation_receive_message(op.flags)
- elif op.type == _types.OpType.RECV_STATUS_ON_CLIENT:
- translated_op = cygrpc.operation_receive_status_on_client(
- op.flags)
- elif op.type == _types.OpType.RECV_CLOSE_ON_SERVER:
- translated_op = cygrpc.operation_receive_close_on_server(op.flags)
- else:
- raise ValueError('unexpected operation type {}'.format(op.type))
- translated_ops.append(translated_op)
- return self.call.start_batch(cygrpc.Operations(translated_ops), tag)
-
- def cancel(self, code=None, details=None):
- if code is None and details is None:
- return self.call.cancel()
- else:
- return self.call.cancel(code, details)
-
- def peer(self):
- return self.call.peer()
-
- def set_credentials(self, creds):
- return self.call.set_credentials(creds)
-
-
-class Channel(_types.Channel):
-
- def __init__(self, target, args, creds=None):
- args = list(args) + [
- (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)]
- args = cygrpc.ChannelArgs(
- cygrpc.ChannelArg(key, value) for key, value in args)
- if creds is None:
- self.channel = cygrpc.Channel(target, args)
- else:
- self.channel = cygrpc.Channel(target, args, creds)
-
- def create_call(self, completion_queue, method, host, deadline=None):
- internal_call = self.channel.create_call(
- None, 0, completion_queue.completion_queue, method, host,
- cygrpc.Timespec(deadline))
- return Call(internal_call)
-
- def check_connectivity_state(self, try_to_connect):
- return self.channel.check_connectivity_state(try_to_connect)
-
- def watch_connectivity_state(self, last_observed_state, deadline,
- completion_queue, tag):
- self.channel.watch_connectivity_state(
- last_observed_state, cygrpc.Timespec(deadline),
- completion_queue.completion_queue, tag)
-
- def target(self):
- return self.channel.target()
-
-
-_NO_TAG = object()
-
-class Server(_types.Server):
-
- def __init__(self, completion_queue, args):
- args = cygrpc.ChannelArgs(
- cygrpc.ChannelArg(key, value) for key, value in args)
- self.server = cygrpc.Server(args)
- self.server.register_completion_queue(completion_queue.completion_queue)
- self.server_queue = completion_queue
-
- def add_http2_port(self, addr, creds=None):
- if creds is None:
- return self.server.add_http2_port(addr)
- else:
- return self.server.add_http2_port(addr, creds)
-
- def start(self):
- return self.server.start()
-
- def shutdown(self, tag=None):
- return self.server.shutdown(self.server_queue.completion_queue, tag)
-
- def request_call(self, completion_queue, tag):
- return self.server.request_call(completion_queue.completion_queue,
- self.server_queue.completion_queue, tag)
-
- def cancel_all_calls(self):
- return self.server.cancel_all_calls()
diff --git a/src/python/grpcio/grpc/_adapter/_types.py b/src/python/grpcio/grpc/_adapter/_types.py
deleted file mode 100644
index b7cc6fbbb5..0000000000
--- a/src/python/grpcio/grpc/_adapter/_types.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import abc
-import collections
-import enum
-
-import six
-
-from grpc._cython import cygrpc
-
-
-class GrpcChannelArgumentKeys(enum.Enum):
- """Mirrors keys used in grpc_channel_args for GRPC-specific arguments."""
- SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override'
-
-
-@enum.unique
-class CallError(enum.IntEnum):
- """Mirrors grpc_call_error in the C core."""
- OK = cygrpc.CallError.ok
- ERROR = cygrpc.CallError.error
- ERROR_NOT_ON_SERVER = cygrpc.CallError.not_on_server
- ERROR_NOT_ON_CLIENT = cygrpc.CallError.not_on_client
- ERROR_ALREADY_ACCEPTED = cygrpc.CallError.already_accepted
- ERROR_ALREADY_INVOKED = cygrpc.CallError.already_invoked
- ERROR_NOT_INVOKED = cygrpc.CallError.not_invoked
- ERROR_ALREADY_FINISHED = cygrpc.CallError.already_finished
- ERROR_TOO_MANY_OPERATIONS = cygrpc.CallError.too_many_operations
- ERROR_INVALID_FLAGS = cygrpc.CallError.invalid_flags
- ERROR_INVALID_METADATA = cygrpc.CallError.invalid_metadata
-
-
-@enum.unique
-class StatusCode(enum.IntEnum):
- """Mirrors grpc_status_code in the C core."""
- OK = cygrpc.StatusCode.ok
- CANCELLED = cygrpc.StatusCode.cancelled
- UNKNOWN = cygrpc.StatusCode.unknown
- INVALID_ARGUMENT = cygrpc.StatusCode.invalid_argument
- DEADLINE_EXCEEDED = cygrpc.StatusCode.deadline_exceeded
- NOT_FOUND = cygrpc.StatusCode.not_found
- ALREADY_EXISTS = cygrpc.StatusCode.already_exists
- PERMISSION_DENIED = cygrpc.StatusCode.permission_denied
- RESOURCE_EXHAUSTED = cygrpc.StatusCode.resource_exhausted
- FAILED_PRECONDITION = cygrpc.StatusCode.failed_precondition
- ABORTED = cygrpc.StatusCode.aborted
- OUT_OF_RANGE = cygrpc.StatusCode.out_of_range
- UNIMPLEMENTED = cygrpc.StatusCode.unimplemented
- INTERNAL = cygrpc.StatusCode.internal
- UNAVAILABLE = cygrpc.StatusCode.unavailable
- DATA_LOSS = cygrpc.StatusCode.data_loss
- UNAUTHENTICATED = cygrpc.StatusCode.unauthenticated
-
-
-@enum.unique
-class OpWriteFlags(enum.IntEnum):
- """Mirrors defined write-flag constants in the C core."""
- WRITE_BUFFER_HINT = cygrpc.WriteFlag.buffer_hint
- WRITE_NO_COMPRESS = cygrpc.WriteFlag.no_compress
-
-
-@enum.unique
-class OpType(enum.IntEnum):
- """Mirrors grpc_op_type in the C core."""
- SEND_INITIAL_METADATA = cygrpc.OperationType.send_initial_metadata
- SEND_MESSAGE = cygrpc.OperationType.send_message
- SEND_CLOSE_FROM_CLIENT = cygrpc.OperationType.send_close_from_client
- SEND_STATUS_FROM_SERVER = cygrpc.OperationType.send_status_from_server
- RECV_INITIAL_METADATA = cygrpc.OperationType.receive_initial_metadata
- RECV_MESSAGE = cygrpc.OperationType.receive_message
- RECV_STATUS_ON_CLIENT = cygrpc.OperationType.receive_status_on_client
- RECV_CLOSE_ON_SERVER = cygrpc.OperationType.receive_close_on_server
-
-
-@enum.unique
-class EventType(enum.IntEnum):
- """Mirrors grpc_completion_type in the C core."""
- QUEUE_SHUTDOWN = cygrpc.CompletionType.queue_shutdown
- QUEUE_TIMEOUT = cygrpc.CompletionType.queue_timeout
- OP_COMPLETE = cygrpc.CompletionType.operation_complete
-
-
-@enum.unique
-class ConnectivityState(enum.IntEnum):
- """Mirrors grpc_connectivity_state in the C core."""
- IDLE = cygrpc.ConnectivityState.idle
- CONNECTING = cygrpc.ConnectivityState.connecting
- READY = cygrpc.ConnectivityState.ready
- TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure
- FATAL_FAILURE = cygrpc.ConnectivityState.shutdown
-
-
-class Status(collections.namedtuple(
- 'Status', [
- 'code',
- 'details',
- ])):
- """The end status of a GRPC call.
-
- Attributes:
- code (StatusCode): ...
- details (str): ...
- """
-
-
-class CallDetails(collections.namedtuple(
- 'CallDetails', [
- 'method',
- 'host',
- 'deadline',
- ])):
- """Provides information to the server about the client's call.
-
- Attributes:
- method (str): ...
- host (str): ...
- deadline (float): ...
- """
-
-
-class OpArgs(collections.namedtuple(
- 'OpArgs', [
- 'type',
- 'initial_metadata',
- 'trailing_metadata',
- 'message',
- 'status',
- 'flags',
- ])):
- """Arguments passed into a GRPC operation.
-
- Attributes:
- type (OpType): ...
- initial_metadata (sequence of 2-sequence of str): Only valid if type ==
- OpType.SEND_INITIAL_METADATA, else is None.
- trailing_metadata (sequence of 2-sequence of str): Only valid if type ==
- OpType.SEND_STATUS_FROM_SERVER, else is None.
- message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None.
- status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else
- is None.
- flags (int): a bitwise OR'ing of 0 or more OpWriteFlags values.
- """
-
- @staticmethod
- def send_initial_metadata(initial_metadata):
- return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None, 0)
-
- @staticmethod
- def send_message(message, flags):
- return OpArgs(OpType.SEND_MESSAGE, None, None, message, None, flags)
-
- @staticmethod
- def send_close_from_client():
- return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None, 0)
-
- @staticmethod
- def send_status_from_server(trailing_metadata, status_code, status_details):
- return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details), 0)
-
- @staticmethod
- def recv_initial_metadata():
- return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None, 0);
-
- @staticmethod
- def recv_message():
- return OpArgs(OpType.RECV_MESSAGE, None, None, None, None, 0)
-
- @staticmethod
- def recv_status_on_client():
- return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None, 0)
-
- @staticmethod
- def recv_close_on_server():
- return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None, 0)
-
-
-class OpResult(collections.namedtuple(
- 'OpResult', [
- 'type',
- 'initial_metadata',
- 'trailing_metadata',
- 'message',
- 'status',
- 'cancelled',
- ])):
- """Results received from a GRPC operation.
-
- Attributes:
- type (OpType): ...
- initial_metadata (sequence of 2-sequence of str): Only valid if type ==
- OpType.RECV_INITIAL_METADATA, else is None.
- trailing_metadata (sequence of 2-sequence of str): Only valid if type ==
- OpType.RECV_STATUS_ON_CLIENT, else is None.
- message (bytes): Only valid if type == OpType.RECV_MESSAGE, else is None.
- status (Status): Only valid if type == OpType.RECV_STATUS_ON_CLIENT, else
- is None.
- cancelled (bool): Only valid if type == OpType.RECV_CLOSE_ON_SERVER, else
- is None.
- """
-
-
-class Event(collections.namedtuple(
- 'Event', [
- 'type',
- 'tag',
- 'call',
- 'call_details',
- 'results',
- 'success',
- ])):
- """An event received from a GRPC completion queue.
-
- Attributes:
- type (EventType): ...
- tag (object): ...
- call (Call): The Call object associated with this event (if there is one,
- else None).
- call_details (CallDetails): The call details associated with the
- server-side call (if there is such information, else None).
- results (list of OpResult): ...
- success (bool): ...
- """
-
-
-class CompletionQueue(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def __init__(self):
- pass
-
- def __iter__(self):
- """This class may be iterated over.
-
- This is the equivalent of calling next() repeatedly with an absolute
- deadline of None (i.e. no deadline).
- """
- return self
-
- def __next__(self):
- return self.next()
-
- @abc.abstractmethod
- def next(self, deadline=float('+inf')):
- """Get the next event on this completion queue.
-
- Args:
- deadline (float): absolute deadline in seconds from the Python epoch, or
- None for no deadline.
-
- Returns:
- Event: ...
- """
- pass
-
- @abc.abstractmethod
- def shutdown(self):
- """Begin the shutdown process of this completion queue.
-
- Note that this does not immediately destroy the completion queue.
- Nevertheless, user code should not pass it around after invoking this.
- """
- return None
-
-
-class Call(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def start_batch(self, ops, tag):
- """Start a batch of operations.
-
- Args:
- ops (sequence of OpArgs): ...
- tag (object): ...
-
- Returns:
- CallError: ...
- """
- return CallError.ERROR
-
- @abc.abstractmethod
- def cancel(self, code=None, details=None):
- """Cancel the call.
-
- Args:
- code (int): Status code to cancel with (on the server side). If
- specified, so must `details`.
- details (str): Status details to cancel with (on the server side). If
- specified, so must `code`.
-
- Returns:
- CallError: ...
- """
- return CallError.ERROR
-
- @abc.abstractmethod
- def peer(self):
- """Get the peer of this call.
-
- Returns:
- str: the peer of this call.
- """
- return None
-
- def set_credentials(self, creds):
- """Set per-call credentials.
-
- Args:
- creds (CallCredentials): Credentials to be set for this call.
- """
- return None
-
-
-class Channel(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def __init__(self, target, args, credentials=None):
- """Initialize a Channel.
-
- Args:
- target (str): ...
- args (sequence of 2-sequence of str, (str|integer)): ...
- credentials (ChannelCredentials): If None, create an insecure channel,
- else create a secure channel using the client credentials.
- """
-
- @abc.abstractmethod
- def create_call(self, completion_queue, method, host, deadline=float('+inf')):
- """Create a call from this channel.
-
- Args:
- completion_queue (CompletionQueue): ...
- method (str): ...
- host (str): ...
- deadline (float): absolute deadline in seconds from the Python epoch, or
- None for no deadline.
-
- Returns:
- Call: call object associated with this Channel and passed parameters.
- """
- return None
-
- @abc.abstractmethod
- def check_connectivity_state(self, try_to_connect):
- """Check and optionally repair the connectivity state of the channel.
-
- Args:
- try_to_connect (bool): whether or not to try to connect the channel if
- disconnected.
-
- Returns:
- ConnectivityState: state of the channel at the time of this invocation.
- """
- return None
-
- @abc.abstractmethod
- def watch_connectivity_state(self, last_observed_state, deadline,
- completion_queue, tag):
- """Watch for connectivity state changes from the last_observed_state.
-
- Args:
- last_observed_state (ConnectivityState): ...
- deadline (float): ...
- completion_queue (CompletionQueue): ...
- tag (object) ...
- """
-
- @abc.abstractmethod
- def target(self):
- """Get the target of this channel.
-
- Returns:
- str: the target of this channel.
- """
- return None
-
-
-class Server(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def __init__(self, completion_queue, args):
- """Initialize a server.
-
- Args:
- completion_queue (CompletionQueue): ...
- args (sequence of 2-sequence of str, (str|integer)): ...
- """
-
- @abc.abstractmethod
- def add_http2_port(self, address, credentials=None):
- """Adds an HTTP/2 address+port to the server.
-
- Args:
- address (str): ...
- credentials (ServerCredentials): If None, create an insecure port, else
- create a secure port using the server credentials.
- """
-
- @abc.abstractmethod
- def start(self):
- """Starts the server."""
-
- @abc.abstractmethod
- def shutdown(self, tag=None):
- """Shuts down the server. Does not immediately destroy the server.
-
- Args:
- tag (object): if not None, have the server place an event on its
- completion queue notifying it when this server has completely shut down.
- """
-
- @abc.abstractmethod
- def request_call(self, completion_queue, tag):
- """Requests a call from the server on the server's completion queue.
-
- Args:
- completion_queue (CompletionQueue): Completion queue for the call. May be
- the same as the server's completion queue.
- tag (object) ...
- """
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
index 6570dcdb85..ba60986143 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -105,8 +105,7 @@ cdef class Call:
def __dealloc__(self):
if self.c_call != NULL:
- with nogil:
- grpc_call_destroy(self.c_call)
+ grpc_call_destroy(self.c_call)
# The object *should* always be valid from Python. Used for debugging.
@property
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index 1406696510..5416401431 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -102,5 +102,4 @@ cdef class Channel:
def __dealloc__(self):
if self.c_channel != NULL:
- with nogil:
- grpc_channel_destroy(self.c_channel)
+ grpc_channel_destroy(self.c_channel)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 90266516fe..5955021ceb 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -118,18 +118,14 @@ cdef class CompletionQueue:
def __dealloc__(self):
cdef gpr_timespec c_deadline
- with nogil:
- c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+ c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
if self.c_completion_queue != NULL:
# Ensure shutdown
if not self.is_shutting_down:
- with nogil:
- grpc_completion_queue_shutdown(self.c_completion_queue)
- # Pump the queue
+ grpc_completion_queue_shutdown(self.c_completion_queue)
+ # Pump the queue (All outstanding calls should have been cancelled)
while not self.is_shutdown:
- with nogil:
- event = grpc_completion_queue_next(
- self.c_completion_queue, c_deadline, NULL)
+ event = grpc_completion_queue_next(
+ self.c_completion_queue, c_deadline, NULL)
self._interpret_event(event)
- with nogil:
- grpc_completion_queue_destroy(self.c_completion_queue)
+ grpc_completion_queue_destroy(self.c_completion_queue)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index b24e69243e..035ac49a8b 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -46,8 +46,7 @@ cdef class ChannelCredentials:
def __dealloc__(self):
if self.c_credentials != NULL:
- with nogil:
- grpc_channel_credentials_release(self.c_credentials)
+ grpc_channel_credentials_release(self.c_credentials)
cdef class CallCredentials:
@@ -64,8 +63,7 @@ cdef class CallCredentials:
def __dealloc__(self):
if self.c_credentials != NULL:
- with nogil:
- grpc_call_credentials_release(self.c_credentials)
+ grpc_call_credentials_release(self.c_credentials)
cdef class ServerCredentials:
@@ -76,8 +74,7 @@ cdef class ServerCredentials:
def __dealloc__(self):
if self.c_credentials != NULL:
- with nogil:
- grpc_server_credentials_release(self.c_credentials)
+ grpc_server_credentials_release(self.c_credentials)
cdef class CredentialsMetadataPlugin:
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index b39b2f08de..54b3d00dfc 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -287,8 +287,7 @@ cdef class ByteBuffer:
def __dealloc__(self):
if self.c_byte_buffer != NULL:
- with nogil:
- grpc_byte_buffer_destroy(self.c_byte_buffer)
+ grpc_byte_buffer_destroy(self.c_byte_buffer)
cdef class SslPemKeyCertPair:
@@ -420,8 +419,7 @@ cdef class Metadata:
# this frees the allocated memory for the grpc_metadata_array (although
# it'd be nice if that were documented somewhere...)
# TODO(atash): document this in the C core
- with nogil:
- grpc_metadata_array_destroy(&self.c_metadata_array)
+ grpc_metadata_array_destroy(&self.c_metadata_array)
def __len__(self):
return self.c_metadata_array.count
@@ -530,8 +528,7 @@ cdef class Operation:
# Python. The remaining one(s) are primitive fields filled in by GRPC core.
# This means that we need to clean up after receive_status_on_client.
if self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT:
- with nogil:
- gpr_free(self._received_status_details)
+ gpr_free(self._received_status_details)
def operation_send_initial_metadata(Metadata metadata, int flags):
cdef Operation op = Operation()
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index 3e03b6efe1..4f2d51b03f 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -171,5 +171,4 @@ cdef class Server:
# much but repeatedly release the GIL and wait
while not self.is_shutdown:
time.sleep(0)
- with nogil:
- grpc_server_destroy(self.c_server)
+ grpc_server_destroy(self.c_server)
diff --git a/src/python/grpcio/grpc/_links/__init__.py b/src/python/grpcio/grpc/_links/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio/grpc/_links/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/_links/_constants.py b/src/python/grpcio/grpc/_links/_constants.py
deleted file mode 100644
index 117fc5a639..0000000000
--- a/src/python/grpcio/grpc/_links/_constants.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Constants for use within this package."""
-
-from grpc._adapter import _intermediary_low
-from grpc.beta import interfaces as beta_interfaces
-
-LOW_STATUS_CODE_TO_HIGH_STATUS_CODE = {
- low: high for low, high in zip(
- _intermediary_low.Code, beta_interfaces.StatusCode)
-}
-
-HIGH_STATUS_CODE_TO_LOW_STATUS_CODE = {
- high: low for low, high in LOW_STATUS_CODE_TO_HIGH_STATUS_CODE.items()
-}
diff --git a/src/python/grpcio/grpc/_links/invocation.py b/src/python/grpcio/grpc/_links/invocation.py
deleted file mode 100644
index 003653e1c8..0000000000
--- a/src/python/grpcio/grpc/_links/invocation.py
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
-
-import abc
-import enum
-import logging
-import threading
-import time
-
-import six
-
-from grpc._adapter import _intermediary_low
-from grpc._links import _constants
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.foundation import activated
-from grpc.framework.foundation import logging_pool
-from grpc.framework.foundation import relay
-from grpc.framework.interfaces.links import links
-
-_IDENTITY = lambda x: x
-
-_STOP = _intermediary_low.Event.Kind.STOP
-_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED
-_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED
-_READ = _intermediary_low.Event.Kind.READ_ACCEPTED
-_METADATA = _intermediary_low.Event.Kind.METADATA_ACCEPTED
-_FINISH = _intermediary_low.Event.Kind.FINISH
-
-
-@enum.unique
-class _Read(enum.Enum):
- AWAITING_METADATA = 'awaiting metadata'
- READING = 'reading'
- AWAITING_ALLOWANCE = 'awaiting allowance'
- CLOSED = 'closed'
-
-
-@enum.unique
-class _HighWrite(enum.Enum):
- OPEN = 'open'
- CLOSED = 'closed'
-
-
-@enum.unique
-class _LowWrite(enum.Enum):
- OPEN = 'OPEN'
- ACTIVE = 'ACTIVE'
- CLOSED = 'CLOSED'
-
-
-class _Context(beta_interfaces.GRPCInvocationContext):
-
- def __init__(self):
- self._lock = threading.Lock()
- self._disable_next_compression = False
-
- def disable_next_request_compression(self):
- with self._lock:
- self._disable_next_compression = True
-
- def next_compression_disabled(self):
- with self._lock:
- disabled = self._disable_next_compression
- self._disable_next_compression = False
- return disabled
-
-
-class _RPCState(object):
-
- def __init__(
- self, call, request_serializer, response_deserializer, sequence_number,
- read, allowance, high_write, low_write, due, context):
- self.call = call
- self.request_serializer = request_serializer
- self.response_deserializer = response_deserializer
- self.sequence_number = sequence_number
- self.read = read
- self.allowance = allowance
- self.high_write = high_write
- self.low_write = low_write
- self.due = due
- self.context = context
-
-
-def _no_longer_due(kind, rpc_state, key, rpc_states):
- rpc_state.due.remove(kind)
- if not rpc_state.due:
- del rpc_states[key]
-
-
-class _Kernel(object):
-
- def __init__(
- self, channel, host, metadata_transformer, request_serializers,
- response_deserializers, ticket_relay):
- self._lock = threading.Lock()
- self._channel = channel
- self._host = host
- self._metadata_transformer = metadata_transformer
- self._request_serializers = request_serializers
- self._response_deserializers = response_deserializers
- self._relay = ticket_relay
-
- self._completion_queue = None
- self._rpc_states = {}
- self._pool = None
-
- def _on_write_event(self, operation_id, unused_event, rpc_state):
- if rpc_state.high_write is _HighWrite.CLOSED:
- rpc_state.call.complete(operation_id)
- rpc_state.due.add(_COMPLETE)
- rpc_state.due.remove(_WRITE)
- rpc_state.low_write = _LowWrite.CLOSED
- else:
- ticket = links.Ticket(
- operation_id, rpc_state.sequence_number, None, None, None, None, 1,
- None, None, None, None, None, None, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
- rpc_state.low_write = _LowWrite.OPEN
- _no_longer_due(_WRITE, rpc_state, operation_id, self._rpc_states)
-
- def _on_read_event(self, operation_id, event, rpc_state):
- if event.bytes is None or _FINISH not in rpc_state.due:
- rpc_state.read = _Read.CLOSED
- _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
- else:
- if 0 < rpc_state.allowance:
- rpc_state.allowance -= 1
- rpc_state.call.read(operation_id)
- else:
- rpc_state.read = _Read.AWAITING_ALLOWANCE
- _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
- ticket = links.Ticket(
- operation_id, rpc_state.sequence_number, None, None, None, None, None,
- None, rpc_state.response_deserializer(event.bytes), None, None, None,
- None, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
-
- def _on_metadata_event(self, operation_id, event, rpc_state):
- if _FINISH in rpc_state.due:
- rpc_state.allowance -= 1
- rpc_state.call.read(operation_id)
- rpc_state.read = _Read.READING
- rpc_state.due.add(_READ)
- rpc_state.due.remove(_METADATA)
- ticket = links.Ticket(
- operation_id, rpc_state.sequence_number, None, None,
- links.Ticket.Subscription.FULL, None, None, event.metadata, None,
- None, None, None, None, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
- else:
- _no_longer_due(_METADATA, rpc_state, operation_id, self._rpc_states)
-
- def _on_finish_event(self, operation_id, event, rpc_state):
- _no_longer_due(_FINISH, rpc_state, operation_id, self._rpc_states)
- if event.status.code == _intermediary_low.Code.OK:
- termination = links.Ticket.Termination.COMPLETION
- elif event.status.code == _intermediary_low.Code.CANCELLED:
- termination = links.Ticket.Termination.CANCELLATION
- elif event.status.code == _intermediary_low.Code.DEADLINE_EXCEEDED:
- termination = links.Ticket.Termination.EXPIRATION
- elif event.status.code == _intermediary_low.Code.UNIMPLEMENTED:
- termination = links.Ticket.Termination.REMOTE_FAILURE
- elif event.status.code == _intermediary_low.Code.UNKNOWN:
- termination = links.Ticket.Termination.LOCAL_FAILURE
- else:
- termination = links.Ticket.Termination.TRANSMISSION_FAILURE
- code = _constants.LOW_STATUS_CODE_TO_HIGH_STATUS_CODE[event.status.code]
- ticket = links.Ticket(
- operation_id, rpc_state.sequence_number, None, None, None, None, None,
- None, None, event.metadata, code, event.status.details, termination,
- None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
-
- def _spin(self, completion_queue):
- while True:
- event = completion_queue.get(None)
- with self._lock:
- rpc_state = self._rpc_states.get(event.tag, None)
- if event.kind is _STOP:
- pass
- elif event.kind is _WRITE:
- self._on_write_event(event.tag, event, rpc_state)
- elif event.kind is _METADATA:
- self._on_metadata_event(event.tag, event, rpc_state)
- elif event.kind is _READ:
- self._on_read_event(event.tag, event, rpc_state)
- elif event.kind is _FINISH:
- self._on_finish_event(event.tag, event, rpc_state)
- elif event.kind is _COMPLETE:
- _no_longer_due(_COMPLETE, rpc_state, event.tag, self._rpc_states)
- else:
- logging.error('Illegal RPC event! %s', (event,))
-
- if self._completion_queue is None and not self._rpc_states:
- completion_queue.stop()
- return
-
- def _invoke(
- self, operation_id, group, method, initial_metadata, payload, termination,
- timeout, allowance, options):
- """Invoke an RPC.
-
- Args:
- operation_id: Any object to be used as an operation ID for the RPC.
- group: The group to which the RPC method belongs.
- method: The RPC method name.
- initial_metadata: The initial metadata object for the RPC.
- payload: A payload object for the RPC or None if no payload was given at
- invocation-time.
- termination: A links.Ticket.Termination value or None indicated whether or
- not more writes will follow from this side of the RPC.
- timeout: A duration of time in seconds to allow for the RPC.
- allowance: The number of payloads (beyond the free first one) that the
- local ticket exchange mate has granted permission to be read.
- options: A beta_interfaces.GRPCCallOptions value or None.
- """
- if termination is links.Ticket.Termination.COMPLETION:
- high_write = _HighWrite.CLOSED
- elif termination is None:
- high_write = _HighWrite.OPEN
- else:
- return
-
- transformed_initial_metadata = self._metadata_transformer(initial_metadata)
- request_serializer = self._request_serializers.get(
- (group, method), _IDENTITY)
- response_deserializer = self._response_deserializers.get(
- (group, method), _IDENTITY)
-
- call = _intermediary_low.Call(
- self._channel, self._completion_queue, '/%s/%s' % (group, method),
- self._host, time.time() + timeout)
- if options is not None and options.credentials is not None:
- call.set_credentials(options.credentials._low_credentials)
- if transformed_initial_metadata is not None:
- for metadata_key, metadata_value in transformed_initial_metadata:
- call.add_metadata(metadata_key, metadata_value)
- call.invoke(self._completion_queue, operation_id, operation_id)
- if payload is None:
- if high_write is _HighWrite.CLOSED:
- call.complete(operation_id)
- low_write = _LowWrite.CLOSED
- due = set((_METADATA, _COMPLETE, _FINISH,))
- else:
- low_write = _LowWrite.OPEN
- due = set((_METADATA, _FINISH,))
- else:
- if options is not None and options.disable_compression:
- flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
- else:
- flags = 0
- call.write(request_serializer(payload), operation_id, flags)
- low_write = _LowWrite.ACTIVE
- due = set((_WRITE, _METADATA, _FINISH,))
- context = _Context()
- self._rpc_states[operation_id] = _RPCState(
- call, request_serializer, response_deserializer, 1,
- _Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance),
- high_write, low_write, due, context)
- protocol = links.Protocol(links.Protocol.Kind.INVOCATION_CONTEXT, context)
- ticket = links.Ticket(
- operation_id, 0, None, None, None, None, None, None, None, None, None,
- None, None, protocol)
- self._relay.add_value(ticket)
-
- def _advance(self, operation_id, rpc_state, payload, termination, allowance):
- if payload is not None:
- disable_compression = rpc_state.context.next_compression_disabled()
- if disable_compression:
- flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
- else:
- flags = 0
- rpc_state.call.write(
- rpc_state.request_serializer(payload), operation_id, flags)
- rpc_state.low_write = _LowWrite.ACTIVE
- rpc_state.due.add(_WRITE)
-
- if allowance is not None:
- if rpc_state.read is _Read.AWAITING_ALLOWANCE:
- rpc_state.allowance += allowance - 1
- rpc_state.call.read(operation_id)
- rpc_state.read = _Read.READING
- rpc_state.due.add(_READ)
- else:
- rpc_state.allowance += allowance
-
- if termination is links.Ticket.Termination.COMPLETION:
- rpc_state.high_write = _HighWrite.CLOSED
- if rpc_state.low_write is _LowWrite.OPEN:
- rpc_state.call.complete(operation_id)
- rpc_state.due.add(_COMPLETE)
- rpc_state.low_write = _LowWrite.CLOSED
- elif termination is not None:
- rpc_state.call.cancel()
-
- def add_ticket(self, ticket):
- with self._lock:
- if ticket.sequence_number == 0:
- if self._completion_queue is None:
- logging.error('Received invocation ticket %s after stop!', ticket)
- else:
- if (ticket.protocol is not None and
- ticket.protocol.kind is links.Protocol.Kind.CALL_OPTION):
- grpc_call_options = ticket.protocol.value
- else:
- grpc_call_options = None
- self._invoke(
- ticket.operation_id, ticket.group, ticket.method,
- ticket.initial_metadata, ticket.payload, ticket.termination,
- ticket.timeout, ticket.allowance, grpc_call_options)
- else:
- rpc_state = self._rpc_states.get(ticket.operation_id)
- if rpc_state is not None:
- self._advance(
- ticket.operation_id, rpc_state, ticket.payload,
- ticket.termination, ticket.allowance)
-
- def start(self):
- """Starts this object.
-
- This method must be called before attempting to exchange tickets with this
- object.
- """
- with self._lock:
- self._completion_queue = _intermediary_low.CompletionQueue()
- self._pool = logging_pool.pool(1)
- self._pool.submit(self._spin, self._completion_queue)
-
- def stop(self):
- """Stops this object.
-
- This method must be called for proper termination of this object, and no
- attempts to exchange tickets with this object may be made after this method
- has been called.
- """
- with self._lock:
- if not self._rpc_states:
- self._completion_queue.stop()
- self._completion_queue = None
- pool = self._pool
- pool.shutdown(wait=True)
-
-
-class InvocationLink(six.with_metaclass(abc.ABCMeta, links.Link, activated.Activated)):
- """A links.Link for use on the invocation-side of a gRPC connection.
-
- Implementations of this interface are only valid for use when activated.
- """
-
-
-class _InvocationLink(InvocationLink):
-
- def __init__(
- self, channel, host, metadata_transformer, request_serializers,
- response_deserializers):
- self._relay = relay.relay(None)
- self._kernel = _Kernel(
- channel, host,
- _IDENTITY if metadata_transformer is None else metadata_transformer,
- {} if request_serializers is None else request_serializers,
- {} if response_deserializers is None else response_deserializers,
- self._relay)
-
- def _start(self):
- self._relay.start()
- self._kernel.start()
- return self
-
- def _stop(self):
- self._kernel.stop()
- self._relay.stop()
-
- def accept_ticket(self, ticket):
- """See links.Link.accept_ticket for specification."""
- self._kernel.add_ticket(ticket)
-
- def join_link(self, link):
- """See links.Link.join_link for specification."""
- self._relay.set_behavior(link.accept_ticket)
-
- def __enter__(self):
- """See activated.Activated.__enter__ for specification."""
- return self._start()
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- """See activated.Activated.__exit__ for specification."""
- self._stop()
- return False
-
- def start(self):
- """See activated.Activated.start for specification."""
- return self._start()
-
- def stop(self):
- """See activated.Activated.stop for specification."""
- self._stop()
-
-
-def invocation_link(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers):
- """Creates an InvocationLink.
-
- Args:
- channel: An _intermediary_low.Channel for use by the link.
- host: The host to specify when invoking RPCs.
- metadata_transformer: A callable that takes an invocation-side initial
- metadata value and returns another metadata value to send in its place.
- May be None.
- request_serializers: A dict from group-method pair to request object
- serialization behavior.
- response_deserializers: A dict from group-method pair to response object
- deserialization behavior.
-
- Returns:
- An InvocationLink.
- """
- return _InvocationLink(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers)
diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py
deleted file mode 100644
index 5fc4994ca0..0000000000
--- a/src/python/grpcio/grpc/_links/service.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The RPC-service-side bridge between RPC Framework and GRPC-on-the-wire."""
-
-import abc
-import enum
-import logging
-import threading
-import six
-import time
-
-from grpc._adapter import _intermediary_low
-from grpc._links import _constants
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.foundation import logging_pool
-from grpc.framework.foundation import relay
-from grpc.framework.interfaces.links import links
-
-_IDENTITY = lambda x: x
-
-_TERMINATION_KIND_TO_CODE = {
- links.Ticket.Termination.COMPLETION: _intermediary_low.Code.OK,
- links.Ticket.Termination.CANCELLATION: _intermediary_low.Code.CANCELLED,
- links.Ticket.Termination.EXPIRATION:
- _intermediary_low.Code.DEADLINE_EXCEEDED,
- links.Ticket.Termination.SHUTDOWN: _intermediary_low.Code.UNAVAILABLE,
- links.Ticket.Termination.RECEPTION_FAILURE: _intermediary_low.Code.INTERNAL,
- links.Ticket.Termination.TRANSMISSION_FAILURE:
- _intermediary_low.Code.INTERNAL,
- links.Ticket.Termination.LOCAL_FAILURE: _intermediary_low.Code.UNKNOWN,
- links.Ticket.Termination.REMOTE_FAILURE: _intermediary_low.Code.UNKNOWN,
-}
-
-_STOP = _intermediary_low.Event.Kind.STOP
-_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED
-_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED
-_SERVICE = _intermediary_low.Event.Kind.SERVICE_ACCEPTED
-_READ = _intermediary_low.Event.Kind.READ_ACCEPTED
-_FINISH = _intermediary_low.Event.Kind.FINISH
-
-
-@enum.unique
-class _Read(enum.Enum):
- READING = 'reading'
- # TODO(issue 2916): This state will again be necessary after eliminating the
- # "early_read" field of _RPCState and going back to only reading when granted
- # allowance to read.
- # AWAITING_ALLOWANCE = 'awaiting allowance'
- CLOSED = 'closed'
-
-
-@enum.unique
-class _HighWrite(enum.Enum):
- OPEN = 'open'
- CLOSED = 'closed'
-
-
-@enum.unique
-class _LowWrite(enum.Enum):
- """The possible categories of low-level write state."""
-
- OPEN = 'OPEN'
- ACTIVE = 'ACTIVE'
- CLOSED = 'CLOSED'
-
-
-class _Context(beta_interfaces.GRPCServicerContext):
-
- def __init__(self, call):
- self._lock = threading.Lock()
- self._call = call
- self._disable_next_compression = False
-
- def peer(self):
- with self._lock:
- return self._call.peer()
-
- def disable_next_response_compression(self):
- with self._lock:
- self._disable_next_compression = True
-
- def next_compression_disabled(self):
- with self._lock:
- disabled = self._disable_next_compression
- self._disable_next_compression = False
- return disabled
-
-
-class _RPCState(object):
-
- def __init__(
- self, request_deserializer, response_serializer, sequence_number, read,
- early_read, allowance, high_write, low_write, premetadataed,
- terminal_metadata, code, message, due, context):
- self.request_deserializer = request_deserializer
- self.response_serializer = response_serializer
- self.sequence_number = sequence_number
- self.read = read
- # TODO(issue 2916): Eliminate this by eliminating the necessity of calling
- # call.read just to advance the RPC.
- self.early_read = early_read # A raw (not deserialized) read.
- self.allowance = allowance
- self.high_write = high_write
- self.low_write = low_write
- self.premetadataed = premetadataed
- self.terminal_metadata = terminal_metadata
- self.code = code
- self.message = message
- self.due = due
- self.context = context
-
-
-def _no_longer_due(kind, rpc_state, key, rpc_states):
- rpc_state.due.remove(kind)
- if not rpc_state.due:
- del rpc_states[key]
-
-
-def _metadatafy(call, metadata):
- for metadata_key, metadata_value in metadata:
- call.add_metadata(metadata_key, metadata_value)
-
-
-def _status(termination_kind, high_code, details):
- low_details = b'' if details is None else details
- if high_code is None:
- low_code = _TERMINATION_KIND_TO_CODE[termination_kind]
- else:
- low_code = _constants.HIGH_STATUS_CODE_TO_LOW_STATUS_CODE[high_code]
- return _intermediary_low.Status(low_code, low_details)
-
-
-class _Kernel(object):
-
- def __init__(self, request_deserializers, response_serializers, ticket_relay):
- self._lock = threading.Lock()
- self._request_deserializers = request_deserializers
- self._response_serializers = response_serializers
- self._relay = ticket_relay
-
- self._completion_queue = None
- self._due = set()
- self._server = None
- self._rpc_states = {}
- self._pool = None
-
- def _on_service_acceptance_event(self, event, server):
- server.service(None)
-
- service_acceptance = event.service_acceptance
- call = service_acceptance.call
- call.accept(self._completion_queue, call)
- try:
- service_method = service_acceptance.method
- if six.PY3:
- service_method = service_method.decode('latin1')
- group, method = service_method.split('/')[1:3]
- except ValueError:
- logging.info('Illegal path "%s"!', service_acceptance.method)
- return
- request_deserializer = self._request_deserializers.get(
- (group, method), _IDENTITY)
- response_serializer = self._response_serializers.get(
- (group, method), _IDENTITY)
-
- call.read(call)
- context = _Context(call)
- self._rpc_states[call] = _RPCState(
- request_deserializer, response_serializer, 1, _Read.READING, None, 1,
- _HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None,
- set((_READ, _FINISH,)), context)
- protocol = links.Protocol(links.Protocol.Kind.SERVICER_CONTEXT, context)
- ticket = links.Ticket(
- call, 0, group, method, links.Ticket.Subscription.FULL,
- service_acceptance.deadline - time.time(), None, event.metadata, None,
- None, None, None, None, protocol)
- self._relay.add_value(ticket)
-
- def _on_read_event(self, event):
- call = event.tag
- rpc_state = self._rpc_states[call]
-
- if event.bytes is None:
- rpc_state.read = _Read.CLOSED
- payload = None
- termination = links.Ticket.Termination.COMPLETION
- _no_longer_due(_READ, rpc_state, call, self._rpc_states)
- else:
- if 0 < rpc_state.allowance:
- payload = rpc_state.request_deserializer(event.bytes)
- termination = None
- rpc_state.allowance -= 1
- call.read(call)
- else:
- rpc_state.early_read = event.bytes
- _no_longer_due(_READ, rpc_state, call, self._rpc_states)
- return
- # TODO(issue 2916): Instead of returning:
- # rpc_state.read = _Read.AWAITING_ALLOWANCE
- ticket = links.Ticket(
- call, rpc_state.sequence_number, None, None, None, None, None, None,
- payload, None, None, None, termination, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
-
- def _on_write_event(self, event):
- call = event.tag
- rpc_state = self._rpc_states[call]
-
- if rpc_state.high_write is _HighWrite.CLOSED:
- if rpc_state.terminal_metadata is not None:
- _metadatafy(call, rpc_state.terminal_metadata)
- status = _status(
- links.Ticket.Termination.COMPLETION, rpc_state.code,
- rpc_state.message)
- call.status(status, call)
- rpc_state.low_write = _LowWrite.CLOSED
- rpc_state.due.add(_COMPLETE)
- rpc_state.due.remove(_WRITE)
- else:
- ticket = links.Ticket(
- call, rpc_state.sequence_number, None, None, None, None, 1, None,
- None, None, None, None, None, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
- rpc_state.low_write = _LowWrite.OPEN
- _no_longer_due(_WRITE, rpc_state, call, self._rpc_states)
-
- def _on_finish_event(self, event):
- call = event.tag
- rpc_state = self._rpc_states[call]
- _no_longer_due(_FINISH, rpc_state, call, self._rpc_states)
- code = event.status.code
- if code == _intermediary_low.Code.OK:
- return
-
- if code == _intermediary_low.Code.CANCELLED:
- termination = links.Ticket.Termination.CANCELLATION
- elif code == _intermediary_low.Code.DEADLINE_EXCEEDED:
- termination = links.Ticket.Termination.EXPIRATION
- else:
- termination = links.Ticket.Termination.TRANSMISSION_FAILURE
- ticket = links.Ticket(
- call, rpc_state.sequence_number, None, None, None, None, None, None,
- None, None, None, None, termination, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(ticket)
-
- def _spin(self, completion_queue, server):
- while True:
- event = completion_queue.get(None)
- with self._lock:
- if event.kind is _STOP:
- self._due.remove(_STOP)
- elif event.kind is _READ:
- self._on_read_event(event)
- elif event.kind is _WRITE:
- self._on_write_event(event)
- elif event.kind is _COMPLETE:
- _no_longer_due(
- _COMPLETE, self._rpc_states.get(event.tag), event.tag,
- self._rpc_states)
- elif event.kind is _intermediary_low.Event.Kind.FINISH:
- self._on_finish_event(event)
- elif event.kind is _SERVICE:
- if self._server is None:
- self._due.remove(_SERVICE)
- else:
- self._on_service_acceptance_event(event, server)
- else:
- logging.error('Illegal event! %s', (event,))
-
- if not self._due and not self._rpc_states:
- completion_queue.stop()
- return
-
- def add_ticket(self, ticket):
- with self._lock:
- call = ticket.operation_id
- rpc_state = self._rpc_states.get(call)
- if rpc_state is None:
- return
-
- if ticket.initial_metadata is not None:
- _metadatafy(call, ticket.initial_metadata)
- call.premetadata()
- rpc_state.premetadataed = True
- elif not rpc_state.premetadataed:
- if (ticket.terminal_metadata is not None or
- ticket.payload is not None or
- ticket.termination is not None or
- ticket.code is not None or
- ticket.message is not None):
- call.premetadata()
- rpc_state.premetadataed = True
-
- if ticket.allowance is not None:
- if rpc_state.early_read is None:
- rpc_state.allowance += ticket.allowance
- else:
- payload = rpc_state.request_deserializer(rpc_state.early_read)
- rpc_state.allowance += ticket.allowance - 1
- rpc_state.early_read = None
- if rpc_state.read is _Read.READING:
- call.read(call)
- rpc_state.due.add(_READ)
- termination = None
- else:
- termination = links.Ticket.Termination.COMPLETION
- early_read_ticket = links.Ticket(
- call, rpc_state.sequence_number, None, None, None, None, None,
- None, payload, None, None, None, termination, None)
- rpc_state.sequence_number += 1
- self._relay.add_value(early_read_ticket)
-
- if ticket.payload is not None:
- disable_compression = rpc_state.context.next_compression_disabled()
- if disable_compression:
- flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
- else:
- flags = 0
- call.write(rpc_state.response_serializer(ticket.payload), call, flags)
- rpc_state.due.add(_WRITE)
- rpc_state.low_write = _LowWrite.ACTIVE
-
- if ticket.terminal_metadata is not None:
- rpc_state.terminal_metadata = ticket.terminal_metadata
- if ticket.code is not None:
- rpc_state.code = ticket.code
- if ticket.message is not None:
- rpc_state.message = ticket.message
-
- if ticket.termination is links.Ticket.Termination.COMPLETION:
- rpc_state.high_write = _HighWrite.CLOSED
- if rpc_state.low_write is _LowWrite.OPEN:
- if rpc_state.terminal_metadata is not None:
- _metadatafy(call, rpc_state.terminal_metadata)
- status = _status(
- links.Ticket.Termination.COMPLETION, rpc_state.code,
- rpc_state.message)
- call.status(status, call)
- rpc_state.due.add(_COMPLETE)
- rpc_state.low_write = _LowWrite.CLOSED
- elif ticket.termination is not None:
- if rpc_state.terminal_metadata is not None:
- _metadatafy(call, rpc_state.terminal_metadata)
- status = _status(
- ticket.termination, rpc_state.code, rpc_state.message)
- call.status(status, call)
- rpc_state.due.add(_COMPLETE)
-
- def add_port(self, address, server_credentials):
- with self._lock:
- if self._server is None:
- self._completion_queue = _intermediary_low.CompletionQueue()
- self._server = _intermediary_low.Server(self._completion_queue)
- if server_credentials is None:
- return self._server.add_http2_addr(address)
- else:
- return self._server.add_secure_http2_addr(address, server_credentials)
-
- def start(self):
- with self._lock:
- if self._server is None:
- self._completion_queue = _intermediary_low.CompletionQueue()
- self._server = _intermediary_low.Server(self._completion_queue)
- self._pool = logging_pool.pool(1)
- self._pool.submit(self._spin, self._completion_queue, self._server)
- self._server.start()
- self._server.service(None)
- self._due.add(_SERVICE)
-
- def begin_stop(self):
- with self._lock:
- self._server.stop()
- self._due.add(_STOP)
- self._server = None
-
- def end_stop(self):
- with self._lock:
- pool = self._pool
- pool.shutdown(wait=True)
-
-
-class ServiceLink(links.Link):
- """A links.Link for use on the service-side of a gRPC connection.
-
- Implementations of this interface are only valid for use between calls to
- their start method and one of their stop methods.
- """
-
- @abc.abstractmethod
- def add_port(self, address, server_credentials):
- """Adds a port on which to service RPCs after this link has been started.
-
- Args:
- address: The address on which to service RPCs with a port number of zero
- requesting that a port number be automatically selected and used.
- server_credentials: An _intermediary_low.ServerCredentials object, or
- None for insecure service.
-
- Returns:
- An integer port on which RPCs will be serviced after this link has been
- started. This is typically the same number as the port number contained
- in the passed address, but will likely be different if the port number
- contained in the passed address was zero.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def start(self):
- """Starts this object.
-
- This method must be called before attempting to use this Link in ticket
- exchange.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def begin_stop(self):
- """Indicate imminent link stop and immediate rejection of new RPCs.
-
- New RPCs will be rejected as soon as this method is called, but ongoing RPCs
- will be allowed to continue until they terminate. This method does not
- block.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def end_stop(self):
- """Finishes stopping this link.
-
- begin_stop must have been called exactly once before calling this method.
-
- All in-progress RPCs will be terminated immediately.
- """
- raise NotImplementedError()
-
-
-class _ServiceLink(ServiceLink):
-
- def __init__(self, request_deserializers, response_serializers):
- self._relay = relay.relay(None)
- self._kernel = _Kernel(
- {} if request_deserializers is None else request_deserializers,
- {} if response_serializers is None else response_serializers,
- self._relay)
-
- def accept_ticket(self, ticket):
- self._kernel.add_ticket(ticket)
-
- def join_link(self, link):
- self._relay.set_behavior(link.accept_ticket)
-
- def add_port(self, address, server_credentials):
- return self._kernel.add_port(address, server_credentials)
-
- def start(self):
- self._relay.start()
- return self._kernel.start()
-
- def begin_stop(self):
- self._kernel.begin_stop()
-
- def end_stop(self):
- self._kernel.end_stop()
- self._relay.stop()
-
-
-def service_link(request_deserializers, response_serializers):
- """Creates a ServiceLink.
-
- Args:
- request_deserializers: A dict from group-method pair to request object
- deserialization behavior.
- response_serializers: A dict from group-method pair to response ojbect
- serialization behavior.
-
- Returns:
- A ServiceLink.
- """
- return _ServiceLink(request_deserializers, response_serializers)
diff --git a/src/python/grpcio/grpc/beta/_server.py b/src/python/grpcio/grpc/beta/_server.py
deleted file mode 100644
index eb0aadb42f..0000000000
--- a/src/python/grpcio/grpc/beta/_server.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Beta API server implementation."""
-
-import threading
-
-from grpc._links import service
-from grpc.beta import interfaces
-from grpc.framework.core import implementations as _core_implementations
-from grpc.framework.crust import implementations as _crust_implementations
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import utilities
-
-_DEFAULT_POOL_SIZE = 8
-_DEFAULT_TIMEOUT = 300
-_MAXIMUM_TIMEOUT = 24 * 60 * 60
-
-
-def _set_event():
- event = threading.Event()
- event.set()
- return event
-
-
-class _GRPCServicer(base.Servicer):
-
- def __init__(self, delegate):
- self._delegate = delegate
-
- def service(self, group, method, context, output_operator):
- try:
- return self._delegate.service(group, method, context, output_operator)
- except base.NoSuchMethodError as e:
- if e.code is None and e.details is None:
- raise base.NoSuchMethodError(
- interfaces.StatusCode.UNIMPLEMENTED,
- 'Method "%s" of service "%s" not implemented!' % (method, group))
- else:
- raise
-
-
-class _Server(interfaces.Server):
-
- def __init__(
- self, implementations, multi_implementation, pool, pool_size,
- default_timeout, maximum_timeout, grpc_link):
- self._lock = threading.Lock()
- self._implementations = implementations
- self._multi_implementation = multi_implementation
- self._customer_pool = pool
- self._pool_size = pool_size
- self._default_timeout = default_timeout
- self._maximum_timeout = maximum_timeout
- self._grpc_link = grpc_link
-
- self._end_link = None
- self._stop_events = None
- self._pool = None
-
- def _start(self):
- with self._lock:
- if self._end_link is not None:
- raise ValueError('Cannot start already-started server!')
-
- if self._customer_pool is None:
- self._pool = logging_pool.pool(self._pool_size)
- assembly_pool = self._pool
- else:
- assembly_pool = self._customer_pool
-
- servicer = _GRPCServicer(
- _crust_implementations.servicer(
- self._implementations, self._multi_implementation, assembly_pool))
-
- self._end_link = _core_implementations.service_end_link(
- servicer, self._default_timeout, self._maximum_timeout)
-
- self._grpc_link.join_link(self._end_link)
- self._end_link.join_link(self._grpc_link)
- self._grpc_link.start()
- self._end_link.start()
-
- def _dissociate_links_and_shut_down_pool(self):
- self._grpc_link.end_stop()
- self._grpc_link.join_link(utilities.NULL_LINK)
- self._end_link.join_link(utilities.NULL_LINK)
- self._end_link = None
- if self._pool is not None:
- self._pool.shutdown(wait=True)
- self._pool = None
-
- def _stop_stopping(self):
- self._dissociate_links_and_shut_down_pool()
- for stop_event in self._stop_events:
- stop_event.set()
- self._stop_events = None
-
- def _stop_started(self):
- self._grpc_link.begin_stop()
- self._end_link.stop(0).wait()
- self._dissociate_links_and_shut_down_pool()
-
- def _foreign_thread_stop(self, end_stop_event, stop_events):
- end_stop_event.wait()
- with self._lock:
- if self._stop_events is stop_events:
- self._stop_stopping()
-
- def _schedule_stop(self, grace):
- with self._lock:
- if self._end_link is None:
- return _set_event()
- server_stop_event = threading.Event()
- if self._stop_events is None:
- self._stop_events = [server_stop_event]
- self._grpc_link.begin_stop()
- else:
- self._stop_events.append(server_stop_event)
- end_stop_event = self._end_link.stop(grace)
- end_stop_thread = threading.Thread(
- target=self._foreign_thread_stop,
- args=(end_stop_event, self._stop_events))
- end_stop_thread.start()
- return server_stop_event
-
- def _stop_now(self):
- with self._lock:
- if self._end_link is not None:
- if self._stop_events is None:
- self._stop_started()
- else:
- self._stop_stopping()
-
- def add_insecure_port(self, address):
- with self._lock:
- if self._end_link is None:
- return self._grpc_link.add_port(address, None)
- else:
- raise ValueError('Can\'t add port to serving server!')
-
- def add_secure_port(self, address, server_credentials):
- with self._lock:
- if self._end_link is None:
- return self._grpc_link.add_port(
- address, server_credentials._low_credentials) # pylint: disable=protected-access
- else:
- raise ValueError('Can\'t add port to serving server!')
-
- def start(self):
- self._start()
-
- def stop(self, grace):
- if 0 < grace:
- return self._schedule_stop(grace)
- else:
- self._stop_now()
- return _set_event()
-
- def __enter__(self):
- self._start()
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self._stop_now()
- return False
-
- def __del__(self):
- self._stop_now()
-
-
-def server(
- implementations, multi_implementation, request_deserializers,
- response_serializers, thread_pool, thread_pool_size, default_timeout,
- maximum_timeout):
- grpc_link = service.service_link(request_deserializers, response_serializers)
- return _Server(
- implementations, multi_implementation, thread_pool,
- _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size,
- _DEFAULT_TIMEOUT if default_timeout is None else default_timeout,
- _MAXIMUM_TIMEOUT if maximum_timeout is None else maximum_timeout,
- grpc_link)
diff --git a/src/python/grpcio/grpc/beta/_stub.py b/src/python/grpcio/grpc/beta/_stub.py
deleted file mode 100644
index 2af019309a..0000000000
--- a/src/python/grpcio/grpc/beta/_stub.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Beta API stub implementation."""
-
-import threading
-
-from grpc._links import invocation
-from grpc.framework.core import implementations as _core_implementations
-from grpc.framework.crust import implementations as _crust_implementations
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.links import utilities
-
-_DEFAULT_POOL_SIZE = 6
-
-
-class _AutoIntermediary(object):
-
- def __init__(self, up, down, delegate):
- self._lock = threading.Lock()
- self._up = up
- self._down = down
- self._in_context = False
- self._delegate = delegate
-
- def __getattr__(self, attr):
- with self._lock:
- if self._delegate is None:
- raise AttributeError('No useful attributes out of context!')
- else:
- return getattr(self._delegate, attr)
-
- def __enter__(self):
- with self._lock:
- if self._in_context:
- raise ValueError('Already in context!')
- elif self._delegate is None:
- self._delegate = self._up()
- self._in_context = True
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- with self._lock:
- if not self._in_context:
- raise ValueError('Not in context!')
- self._down()
- self._in_context = False
- self._delegate = None
- return False
-
- def __del__(self):
- with self._lock:
- if self._delegate is not None:
- self._down()
- self._delegate = None
-
-
-class _StubAssemblyManager(object):
-
- def __init__(
- self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator):
- self._thread_pool = thread_pool
- self._pool_size = thread_pool_size
- self._end_link = end_link
- self._grpc_link = grpc_link
- self._stub_creator = stub_creator
- self._own_pool = None
-
- def up(self):
- if self._thread_pool is None:
- self._own_pool = logging_pool.pool(
- _DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size)
- assembly_pool = self._own_pool
- else:
- assembly_pool = self._thread_pool
- self._end_link.join_link(self._grpc_link)
- self._grpc_link.join_link(self._end_link)
- self._end_link.start()
- self._grpc_link.start()
- return self._stub_creator(self._end_link, assembly_pool)
-
- def down(self):
- self._end_link.stop(0).wait()
- self._grpc_link.stop()
- self._end_link.join_link(utilities.NULL_LINK)
- self._grpc_link.join_link(utilities.NULL_LINK)
- if self._own_pool is not None:
- self._own_pool.shutdown(wait=True)
- self._own_pool = None
-
-
-def _assemble(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers, thread_pool, thread_pool_size, stub_creator):
- end_link = _core_implementations.invocation_end_link()
- grpc_link = invocation.invocation_link(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers)
- stub_assembly_manager = _StubAssemblyManager(
- thread_pool, thread_pool_size, end_link, grpc_link, stub_creator)
- stub = stub_assembly_manager.up()
- return _AutoIntermediary(
- stub_assembly_manager.up, stub_assembly_manager.down, stub)
-
-
-def _dynamic_stub_creator(service, cardinalities):
- def create_dynamic_stub(end_link, invocation_pool):
- return _crust_implementations.dynamic_stub(
- end_link, service, cardinalities, invocation_pool)
- return create_dynamic_stub
-
-
-def generic_stub(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers, thread_pool, thread_pool_size):
- return _assemble(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers, thread_pool, thread_pool_size,
- _crust_implementations.generic_stub)
-
-
-def dynamic_stub(
- channel, host, service, cardinalities, metadata_transformer,
- request_serializers, response_deserializers, thread_pool,
- thread_pool_size):
- return _assemble(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers, thread_pool, thread_pool_size,
- _dynamic_stub_creator(service, cardinalities))
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index 4ae6e7d675..ab25fd5eec 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -37,7 +37,6 @@ import threading # pylint: disable=unused-import
# cardinality and face are referenced from specification in this module.
import grpc
from grpc import _auth
-from grpc._adapter import _types
from grpc.beta import _client_adaptations
from grpc.beta import _server_adaptations
from grpc.beta import interfaces
diff --git a/src/python/grpcio/grpc/framework/core/__init__.py b/src/python/grpcio/grpc/framework/core/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio/grpc/framework/core/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/core/_constants.py b/src/python/grpcio/grpc/framework/core/_constants.py
deleted file mode 100644
index 0f47cb48e0..0000000000
--- a/src/python/grpcio/grpc/framework/core/_constants.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Private constants for the package."""
-
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-
-TICKET_SUBSCRIPTION_FOR_BASE_SUBSCRIPTION_KIND = {
- base.Subscription.Kind.NONE: links.Ticket.Subscription.NONE,
- base.Subscription.Kind.TERMINATION_ONLY:
- links.Ticket.Subscription.TERMINATION,
- base.Subscription.Kind.FULL: links.Ticket.Subscription.FULL,
- }
-
-# Mapping from abortive operation outcome to ticket termination to be
-# sent to the other side of the operation, or None to indicate that no
-# ticket should be sent to the other side in the event of such an
-# outcome.
-ABORTION_OUTCOME_TO_TICKET_TERMINATION = {
- base.Outcome.Kind.CANCELLED: links.Ticket.Termination.CANCELLATION,
- base.Outcome.Kind.EXPIRED: links.Ticket.Termination.EXPIRATION,
- base.Outcome.Kind.LOCAL_SHUTDOWN: links.Ticket.Termination.SHUTDOWN,
- base.Outcome.Kind.REMOTE_SHUTDOWN: None,
- base.Outcome.Kind.RECEPTION_FAILURE:
- links.Ticket.Termination.RECEPTION_FAILURE,
- base.Outcome.Kind.TRANSMISSION_FAILURE: None,
- base.Outcome.Kind.LOCAL_FAILURE: links.Ticket.Termination.LOCAL_FAILURE,
- base.Outcome.Kind.REMOTE_FAILURE: links.Ticket.Termination.REMOTE_FAILURE,
-}
-
-INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Core) internal error! )-:'
-TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE = (
- 'Exception calling termination callback!')
diff --git a/src/python/grpcio/grpc/framework/core/_context.py b/src/python/grpcio/grpc/framework/core/_context.py
deleted file mode 100644
index a346e9d478..0000000000
--- a/src/python/grpcio/grpc/framework/core/_context.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation context."""
-
-import time
-
-# _interfaces is referenced from specification in this module.
-from grpc.framework.core import _interfaces # pylint: disable=unused-import
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-
-
-class OperationContext(base.OperationContext):
- """An implementation of interfaces.OperationContext."""
-
- def __init__(
- self, lock, termination_manager, transmission_manager,
- expiration_manager):
- """Constructor.
-
- Args:
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- """
- self._lock = lock
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
-
- def _abort(self, outcome_kind):
- with self._lock:
- if self._termination_manager.outcome is None:
- outcome = _utilities.Outcome(outcome_kind, None, None)
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(outcome)
- self._expiration_manager.terminate()
-
- def outcome(self):
- """See base.OperationContext.outcome for specification."""
- with self._lock:
- return self._termination_manager.outcome
-
- def add_termination_callback(self, callback):
- """See base.OperationContext.add_termination_callback."""
- with self._lock:
- if self._termination_manager.outcome is None:
- self._termination_manager.add_callback(callback)
- return None
- else:
- return self._termination_manager.outcome
-
- def time_remaining(self):
- """See base.OperationContext.time_remaining for specification."""
- with self._lock:
- deadline = self._expiration_manager.deadline()
- return max(0.0, deadline - time.time())
-
- def cancel(self):
- """See base.OperationContext.cancel for specification."""
- self._abort(base.Outcome.Kind.CANCELLED)
-
- def fail(self, exception):
- """See base.OperationContext.fail for specification."""
- self._abort(base.Outcome.Kind.LOCAL_FAILURE)
diff --git a/src/python/grpcio/grpc/framework/core/_emission.py b/src/python/grpcio/grpc/framework/core/_emission.py
deleted file mode 100644
index 8ab59dc3e5..0000000000
--- a/src/python/grpcio/grpc/framework/core/_emission.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for handling emitted values."""
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-
-
-class EmissionManager(_interfaces.EmissionManager):
- """An EmissionManager implementation."""
-
- def __init__(
- self, lock, termination_manager, transmission_manager,
- expiration_manager):
- """Constructor.
-
- Args:
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- """
- self._lock = lock
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
- self._ingestion_manager = None
-
- self._initial_metadata_seen = False
- self._payload_seen = False
- self._completion_seen = False
-
- def set_ingestion_manager(self, ingestion_manager):
- """Sets the ingestion manager with which this manager will cooperate.
-
- Args:
- ingestion_manager: The _interfaces.IngestionManager for the operation.
- """
- self._ingestion_manager = ingestion_manager
-
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- initial_metadata_present = initial_metadata is not None
- payload_present = payload is not None
- completion_present = completion is not None
- allowance_present = allowance is not None
- with self._lock:
- if self._termination_manager.outcome is None:
- if (initial_metadata_present and (
- self._initial_metadata_seen or self._payload_seen or
- self._completion_seen) or
- payload_present and self._completion_seen or
- completion_present and self._completion_seen or
- allowance_present and allowance <= 0):
- outcome = _utilities.Outcome(
- base.Outcome.Kind.LOCAL_FAILURE, None, None)
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(outcome)
- self._expiration_manager.terminate()
- else:
- self._initial_metadata_seen |= initial_metadata_present
- self._payload_seen |= payload_present
- self._completion_seen |= completion_present
- if completion_present:
- self._termination_manager.emission_complete()
- self._ingestion_manager.local_emissions_done()
- self._transmission_manager.advance(
- initial_metadata, payload, completion, allowance)
- if allowance_present:
- self._ingestion_manager.add_local_allowance(allowance)
diff --git a/src/python/grpcio/grpc/framework/core/_end.py b/src/python/grpcio/grpc/framework/core/_end.py
deleted file mode 100644
index 009d27c915..0000000000
--- a/src/python/grpcio/grpc/framework/core/_end.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementation of base.End."""
-
-import abc
-import threading
-import uuid
-
-import six
-
-from grpc.framework.core import _operation
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.foundation import later
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-from grpc.framework.interfaces.links import utilities
-
-_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
-
-
-class End(six.with_metaclass(abc.ABCMeta, base.End, links.Link)):
- """A bridge between base.End and links.Link.
-
- Implementations of this interface translate arriving tickets into
- calls on application objects implementing base interfaces and
- translate calls from application objects implementing base interfaces
- into tickets sent to a joined link.
- """
-
-
-class _Cycle(object):
- """State for a single start-stop End lifecycle."""
-
- def __init__(self, pool):
- self.pool = pool
- self.grace = False
- self.futures = []
- self.operations = {}
- self.idle_actions = []
-
-
-def _abort(operations):
- for operation in operations:
- operation.abort(base.Outcome.Kind.LOCAL_SHUTDOWN)
-
-
-def _cancel_futures(futures):
- for future in futures:
- future.cancel()
-
-
-def _future_shutdown(lock, cycle, event):
- def in_future():
- with lock:
- _abort(cycle.operations.values())
- _cancel_futures(cycle.futures)
- return in_future
-
-
-class _End(End):
- """An End implementation."""
-
- def __init__(self, servicer_package):
- """Constructor.
-
- Args:
- servicer_package: A _ServicerPackage for servicing operations or None if
- this end will not be used to service operations.
- """
- self._lock = threading.Condition()
- self._servicer_package = servicer_package
-
- self._stats = {outcome_kind: 0 for outcome_kind in base.Outcome.Kind}
-
- self._mate = None
-
- self._cycle = None
-
- def _termination_action(self, operation_id):
- """Constructs the termination action for a single operation.
-
- Args:
- operation_id: The operation ID for the termination action.
-
- Returns:
- A callable that takes an operation outcome kind as its sole parameter and
- that should be used as the termination action for the operation
- associated with the given operation ID.
- """
- def termination_action(outcome_kind):
- with self._lock:
- self._stats[outcome_kind] += 1
- self._cycle.operations.pop(operation_id, None)
- if not self._cycle.operations:
- for action in self._cycle.idle_actions:
- self._cycle.pool.submit(action)
- self._cycle.idle_actions = []
- if self._cycle.grace:
- _cancel_futures(self._cycle.futures)
- self._cycle.pool.shutdown(wait=False)
- self._cycle = None
- return termination_action
-
- def start(self):
- """See base.End.start for specification."""
- with self._lock:
- if self._cycle is not None:
- raise ValueError('Tried to start a not-stopped End!')
- else:
- self._cycle = _Cycle(logging_pool.pool(1))
-
- def stop(self, grace):
- """See base.End.stop for specification."""
- with self._lock:
- if self._cycle is None:
- event = threading.Event()
- event.set()
- return event
- elif not self._cycle.operations:
- event = threading.Event()
- self._cycle.pool.submit(event.set)
- self._cycle.pool.shutdown(wait=False)
- self._cycle = None
- return event
- else:
- self._cycle.grace = True
- event = threading.Event()
- self._cycle.idle_actions.append(event.set)
- if 0 < grace:
- future = later.later(
- grace, _future_shutdown(self._lock, self._cycle, event))
- self._cycle.futures.append(future)
- else:
- _abort(self._cycle.operations.values())
- return event
-
- def operate(
- self, group, method, subscription, timeout, initial_metadata=None,
- payload=None, completion=None, protocol_options=None):
- """See base.End.operate for specification."""
- operation_id = uuid.uuid4()
- with self._lock:
- if self._cycle is None or self._cycle.grace:
- raise ValueError('Can\'t operate on stopped or stopping End!')
- termination_action = self._termination_action(operation_id)
- operation = _operation.invocation_operate(
- operation_id, group, method, subscription, timeout, protocol_options,
- initial_metadata, payload, completion, self._mate.accept_ticket,
- termination_action, self._cycle.pool)
- self._cycle.operations[operation_id] = operation
- return operation.context, operation.operator
-
- def operation_stats(self):
- """See base.End.operation_stats for specification."""
- with self._lock:
- return dict(self._stats)
-
- def add_idle_action(self, action):
- """See base.End.add_idle_action for specification."""
- with self._lock:
- if self._cycle is None:
- raise ValueError('Can\'t add idle action to stopped End!')
- action_with_exceptions_logged = callable_util.with_exceptions_logged(
- action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE)
- if self._cycle.operations:
- self._cycle.idle_actions.append(action_with_exceptions_logged)
- else:
- self._cycle.pool.submit(action_with_exceptions_logged)
-
- def accept_ticket(self, ticket):
- """See links.Link.accept_ticket for specification."""
- with self._lock:
- if self._cycle is not None:
- operation = self._cycle.operations.get(ticket.operation_id)
- if operation is not None:
- operation.handle_ticket(ticket)
- elif self._servicer_package is not None and not self._cycle.grace:
- termination_action = self._termination_action(ticket.operation_id)
- operation = _operation.service_operate(
- self._servicer_package, ticket, self._mate.accept_ticket,
- termination_action, self._cycle.pool)
- if operation is not None:
- self._cycle.operations[ticket.operation_id] = operation
-
- def join_link(self, link):
- """See links.Link.join_link for specification."""
- with self._lock:
- self._mate = utilities.NULL_LINK if link is None else link
-
-
-def serviceless_end_link():
- """Constructs an End usable only for invoking operations.
-
- Returns:
- An End usable for translating operations into ticket exchange.
- """
- return _End(None)
-
-
-def serviceful_end_link(servicer, default_timeout, maximum_timeout):
- """Constructs an End capable of servicing operations.
-
- Args:
- servicer: An interfaces.Servicer for servicing operations.
- default_timeout: A length of time in seconds to be used as the default
- time alloted for a single operation.
- maximum_timeout: A length of time in seconds to be used as the maximum
- time alloted for a single operation.
-
- Returns:
- An End capable of servicing the operations requested of it through ticket
- exchange.
- """
- return _End(
- _utilities.ServicerPackage(servicer, default_timeout, maximum_timeout))
diff --git a/src/python/grpcio/grpc/framework/core/_expiration.py b/src/python/grpcio/grpc/framework/core/_expiration.py
deleted file mode 100644
index ded0ab6bce..0000000000
--- a/src/python/grpcio/grpc/framework/core/_expiration.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation expiration."""
-
-import time
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import later
-from grpc.framework.interfaces.base import base
-
-
-class _ExpirationManager(_interfaces.ExpirationManager):
- """An implementation of _interfaces.ExpirationManager."""
-
- def __init__(
- self, commencement, timeout, maximum_timeout, lock, termination_manager,
- transmission_manager):
- """Constructor.
-
- Args:
- commencement: The time in seconds since the epoch at which the operation
- began.
- timeout: A length of time in seconds to allow for the operation to run.
- maximum_timeout: The maximum length of time in seconds to allow for the
- operation to run despite what is requested via this object's
- change_timout method.
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- """
- self._lock = lock
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._commencement = commencement
- self._maximum_timeout = maximum_timeout
-
- self._timeout = timeout
- self._deadline = commencement + timeout
- self._index = None
- self._future = None
-
- def _expire(self, index):
- def expire():
- with self._lock:
- if self._future is not None and index == self._index:
- self._future = None
- self._termination_manager.expire()
- self._transmission_manager.abort(
- _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None))
- return expire
-
- def start(self):
- self._index = 0
- self._future = later.later(self._timeout, self._expire(0))
-
- def change_timeout(self, timeout):
- if self._future is not None and timeout != self._timeout:
- self._future.cancel()
- new_timeout = min(timeout, self._maximum_timeout)
- new_index = self._index + 1
- self._timeout = new_timeout
- self._deadline = self._commencement + new_timeout
- self._index = new_index
- delay = self._deadline - time.time()
- self._future = later.later(delay, self._expire(new_index))
- if new_timeout != timeout:
- self._transmission_manager.timeout(new_timeout)
-
- def deadline(self):
- return self._deadline
-
- def terminate(self):
- if self._future:
- self._future.cancel()
- self._future = None
- self._deadline_index = None
-
-
-def invocation_expiration_manager(
- timeout, lock, termination_manager, transmission_manager):
- """Creates an _interfaces.ExpirationManager appropriate for front-side use.
-
- Args:
- timeout: A length of time in seconds to allow for the operation to run.
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
-
- Returns:
- An _interfaces.ExpirationManager appropriate for invocation-side use.
- """
- expiration_manager = _ExpirationManager(
- time.time(), timeout, timeout, lock, termination_manager,
- transmission_manager)
- expiration_manager.start()
- return expiration_manager
-
-
-def service_expiration_manager(
- timeout, default_timeout, maximum_timeout, lock, termination_manager,
- transmission_manager):
- """Creates an _interfaces.ExpirationManager appropriate for back-side use.
-
- Args:
- timeout: A length of time in seconds to allow for the operation to run. May
- be None in which case default_timeout will be used.
- default_timeout: The default length of time in seconds to allow for the
- operation to run if the front-side customer has not specified such a value
- (or if the value they specified is not yet known).
- maximum_timeout: The maximum length of time in seconds to allow for the
- operation to run.
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
-
- Returns:
- An _interfaces.ExpirationManager appropriate for service-side use.
- """
- expiration_manager = _ExpirationManager(
- time.time(), default_timeout if timeout is None else timeout,
- maximum_timeout, lock, termination_manager, transmission_manager)
- expiration_manager.start()
- return expiration_manager
diff --git a/src/python/grpcio/grpc/framework/core/_ingestion.py b/src/python/grpcio/grpc/framework/core/_ingestion.py
deleted file mode 100644
index f2767c981b..0000000000
--- a/src/python/grpcio/grpc/framework/core/_ingestion.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ingestion during an operation."""
-
-import abc
-import collections
-import enum
-
-import six
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import abandonment
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-_CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!'
-_INGESTION_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!'
-
-
-class _SubscriptionCreation(
- collections.namedtuple(
- '_SubscriptionCreation',
- ('kind', 'subscription', 'code', 'details',))):
- """A sum type for the outcome of ingestion initialization.
-
- Attributes:
- kind: A Kind value coarsely indicating how subscription creation completed.
- subscription: The created subscription. Only present if kind is
- Kind.SUBSCRIPTION.
- code: A code value to be sent to the other side of the operation along with
- an indication that the operation is being aborted due to an error on the
- remote side of the operation. Only present if kind is Kind.REMOTE_ERROR.
- details: A details value to be sent to the other side of the operation
- along with an indication that the operation is being aborted due to an
- error on the remote side of the operation. Only present if kind is
- Kind.REMOTE_ERROR.
- """
-
- @enum.unique
- class Kind(enum.Enum):
- SUBSCRIPTION = 'subscription'
- REMOTE_ERROR = 'remote error'
- ABANDONED = 'abandoned'
-
-
-class _SubscriptionCreator(six.with_metaclass(abc.ABCMeta)):
- """Common specification of subscription-creating behavior."""
-
- @abc.abstractmethod
- def create(self, group, method):
- """Creates the base.Subscription of the local customer.
-
- Any exceptions raised by this method should be attributed to and treated as
- defects in the customer code called by this method.
-
- Args:
- group: The group identifier of the operation.
- method: The method identifier of the operation.
-
- Returns:
- A _SubscriptionCreation describing the result of subscription creation.
- """
- raise NotImplementedError()
-
-
-class _ServiceSubscriptionCreator(_SubscriptionCreator):
- """A _SubscriptionCreator appropriate for service-side use."""
-
- def __init__(self, servicer, operation_context, output_operator):
- """Constructor.
-
- Args:
- servicer: The base.Servicer that will service the operation.
- operation_context: A base.OperationContext for the operation to be passed
- to the customer.
- output_operator: A base.Operator for the operation to be passed to the
- customer and to be called by the customer to accept operation data
- emitted by the customer.
- """
- self._servicer = servicer
- self._operation_context = operation_context
- self._output_operator = output_operator
-
- def create(self, group, method):
- try:
- subscription = self._servicer.service(
- group, method, self._operation_context, self._output_operator)
- except base.NoSuchMethodError as e:
- return _SubscriptionCreation(
- _SubscriptionCreation.Kind.REMOTE_ERROR, None, e.code, e.details)
- except abandonment.Abandoned:
- return _SubscriptionCreation(
- _SubscriptionCreation.Kind.ABANDONED, None, None, None)
- else:
- return _SubscriptionCreation(
- _SubscriptionCreation.Kind.SUBSCRIPTION, subscription, None, None)
-
-
-def _wrap(behavior):
- def wrapped(*args, **kwargs):
- try:
- behavior(*args, **kwargs)
- except abandonment.Abandoned:
- return False
- else:
- return True
- return wrapped
-
-
-class _IngestionManager(_interfaces.IngestionManager):
- """An implementation of _interfaces.IngestionManager."""
-
- def __init__(
- self, lock, pool, subscription, subscription_creator, termination_manager,
- transmission_manager, expiration_manager, protocol_manager):
- """Constructor.
-
- Args:
- lock: The operation-wide lock.
- pool: A thread pool in which to execute customer code.
- subscription: A base.Subscription describing the customer's interest in
- operation values from the other side. May be None if
- subscription_creator is not None.
- subscription_creator: A _SubscriptionCreator wrapping the portion of
- customer code that when called returns the base.Subscription describing
- the customer's interest in operation values from the other side. May be
- None if subscription is not None.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- protocol_manager: The _interfaces.ProtocolManager for the operation.
- """
- self._lock = lock
- self._pool = pool
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
- self._protocol_manager = protocol_manager
-
- if subscription is None:
- self._subscription_creator = subscription_creator
- self._wrapped_operator = None
- elif subscription.kind is base.Subscription.Kind.FULL:
- self._subscription_creator = None
- self._wrapped_operator = _wrap(subscription.operator.advance)
- else:
- # TODO(nathaniel): Support other subscriptions.
- raise ValueError('Unsupported subscription "%s"!' % subscription.kind)
- self._pending_initial_metadata = None
- self._pending_payloads = []
- self._pending_completion = None
- self._local_allowance = 1
- # A nonnegative integer or None, with None indicating that the local
- # customer is done emitting anyway so there's no need to bother it by
- # informing it that the remote customer has granted it further permission to
- # emit.
- self._remote_allowance = 0
- self._processing = False
-
- def _abort_internal_only(self):
- self._subscription_creator = None
- self._wrapped_operator = None
- self._pending_initial_metadata = None
- self._pending_payloads = None
- self._pending_completion = None
-
- def _abort_and_notify(self, outcome_kind, code, details):
- self._abort_internal_only()
- if self._termination_manager.outcome is None:
- outcome = _utilities.Outcome(outcome_kind, code, details)
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(outcome)
- self._expiration_manager.terminate()
-
- def _operator_next(self):
- """Computes the next step for full-subscription ingestion.
-
- Returns:
- An initial_metadata, payload, completion, allowance, continue quintet
- indicating what operation values (if any) are available to pass into
- customer code and whether or not there is anything immediately
- actionable to call customer code to do.
- """
- if self._wrapped_operator is None:
- return None, None, None, None, False
- else:
- initial_metadata, payload, completion, allowance, action = [None] * 5
- if self._pending_initial_metadata is not None:
- initial_metadata = self._pending_initial_metadata
- self._pending_initial_metadata = None
- action = True
- if self._pending_payloads and 0 < self._local_allowance:
- payload = self._pending_payloads.pop(0)
- self._local_allowance -= 1
- action = True
- if not self._pending_payloads and self._pending_completion is not None:
- completion = self._pending_completion
- self._pending_completion = None
- action = True
- if self._remote_allowance is not None and 0 < self._remote_allowance:
- allowance = self._remote_allowance
- self._remote_allowance = 0
- action = True
- return initial_metadata, payload, completion, allowance, bool(action)
-
- def _operator_process(
- self, wrapped_operator, initial_metadata, payload,
- completion, allowance):
- while True:
- advance_outcome = callable_util.call_logging_exceptions(
- wrapped_operator, _INGESTION_EXCEPTION_LOG_MESSAGE,
- initial_metadata=initial_metadata, payload=payload,
- completion=completion, allowance=allowance)
- if advance_outcome.exception is None:
- if advance_outcome.return_value:
- with self._lock:
- if self._termination_manager.outcome is not None:
- return
- if completion is not None:
- self._termination_manager.ingestion_complete()
- initial_metadata, payload, completion, allowance, moar = (
- self._operator_next())
- if not moar:
- self._processing = False
- return
- else:
- with self._lock:
- if self._termination_manager.outcome is None:
- self._abort_and_notify(
- base.Outcome.Kind.LOCAL_FAILURE, None, None)
- return
- else:
- with self._lock:
- if self._termination_manager.outcome is None:
- self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
- return
-
- def _operator_post_create(self, subscription):
- wrapped_operator = _wrap(subscription.operator.advance)
- with self._lock:
- if self._termination_manager.outcome is not None:
- return
- self._wrapped_operator = wrapped_operator
- self._subscription_creator = None
- metadata, payload, completion, allowance, moar = self._operator_next()
- if not moar:
- self._processing = False
- return
- self._operator_process(
- wrapped_operator, metadata, payload, completion, allowance)
-
- def _create(self, subscription_creator, group, name):
- outcome = callable_util.call_logging_exceptions(
- subscription_creator.create,
- _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, group, name)
- if outcome.return_value is None:
- with self._lock:
- if self._termination_manager.outcome is None:
- self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
- elif outcome.return_value.kind is _SubscriptionCreation.Kind.ABANDONED:
- with self._lock:
- if self._termination_manager.outcome is None:
- self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
- elif outcome.return_value.kind is _SubscriptionCreation.Kind.REMOTE_ERROR:
- code = outcome.return_value.code
- details = outcome.return_value.details
- with self._lock:
- if self._termination_manager.outcome is None:
- self._abort_and_notify(
- base.Outcome.Kind.REMOTE_FAILURE, code, details)
- elif outcome.return_value.subscription.kind is base.Subscription.Kind.FULL:
- self._protocol_manager.set_protocol_receiver(
- outcome.return_value.subscription.protocol_receiver)
- self._operator_post_create(outcome.return_value.subscription)
- else:
- # TODO(nathaniel): Support other subscriptions.
- raise ValueError(
- 'Unsupported "%s"!' % outcome.return_value.subscription.kind)
-
- def _store_advance(self, initial_metadata, payload, completion, allowance):
- if initial_metadata is not None:
- self._pending_initial_metadata = initial_metadata
- if payload is not None:
- self._pending_payloads.append(payload)
- if completion is not None:
- self._pending_completion = completion
- if allowance is not None and self._remote_allowance is not None:
- self._remote_allowance += allowance
-
- def _operator_advance(self, initial_metadata, payload, completion, allowance):
- if self._processing:
- self._store_advance(initial_metadata, payload, completion, allowance)
- else:
- action = False
- if initial_metadata is not None:
- action = True
- if payload is not None:
- if 0 < self._local_allowance:
- self._local_allowance -= 1
- action = True
- else:
- self._pending_payloads.append(payload)
- payload = False
- if completion is not None:
- if self._pending_payloads:
- self._pending_completion = completion
- else:
- action = True
- if allowance is not None and self._remote_allowance is not None:
- allowance += self._remote_allowance
- self._remote_allowance = 0
- action = True
- if action:
- self._pool.submit(
- callable_util.with_exceptions_logged(
- self._operator_process, _constants.INTERNAL_ERROR_LOG_MESSAGE),
- self._wrapped_operator, initial_metadata, payload, completion,
- allowance)
-
- def set_group_and_method(self, group, method):
- """See _interfaces.IngestionManager.set_group_and_method for spec."""
- if self._subscription_creator is not None and not self._processing:
- self._pool.submit(
- callable_util.with_exceptions_logged(
- self._create, _constants.INTERNAL_ERROR_LOG_MESSAGE),
- self._subscription_creator, group, method)
- self._processing = True
-
- def add_local_allowance(self, allowance):
- """See _interfaces.IngestionManager.add_local_allowance for spec."""
- if any((self._subscription_creator, self._wrapped_operator,)):
- self._local_allowance += allowance
- if not self._processing:
- initial_metadata, payload, completion, allowance, moar = (
- self._operator_next())
- if moar:
- self._pool.submit(
- callable_util.with_exceptions_logged(
- self._operator_process,
- _constants.INTERNAL_ERROR_LOG_MESSAGE),
- initial_metadata, payload, completion, allowance)
-
- def local_emissions_done(self):
- self._remote_allowance = None
-
- def advance(self, initial_metadata, payload, completion, allowance):
- """See _interfaces.IngestionManager.advance for specification."""
- if self._subscription_creator is not None:
- self._store_advance(initial_metadata, payload, completion, allowance)
- elif self._wrapped_operator is not None:
- self._operator_advance(initial_metadata, payload, completion, allowance)
-
-
-def invocation_ingestion_manager(
- subscription, lock, pool, termination_manager, transmission_manager,
- expiration_manager, protocol_manager):
- """Creates an IngestionManager appropriate for invocation-side use.
-
- Args:
- subscription: A base.Subscription indicating the customer's interest in the
- data and results from the service-side of the operation.
- lock: The operation-wide lock.
- pool: A thread pool in which to execute customer code.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- protocol_manager: The _interfaces.ProtocolManager for the operation.
-
- Returns:
- An IngestionManager appropriate for invocation-side use.
- """
- return _IngestionManager(
- lock, pool, subscription, None, termination_manager, transmission_manager,
- expiration_manager, protocol_manager)
-
-
-def service_ingestion_manager(
- servicer, operation_context, output_operator, lock, pool,
- termination_manager, transmission_manager, expiration_manager,
- protocol_manager):
- """Creates an IngestionManager appropriate for service-side use.
-
- The returned IngestionManager will require its set_group_and_name method to be
- called before its advance method may be called.
-
- Args:
- servicer: A base.Servicer for servicing the operation.
- operation_context: A base.OperationContext for the operation to be passed to
- the customer.
- output_operator: A base.Operator for the operation to be passed to the
- customer and to be called by the customer to accept operation data output
- by the customer.
- lock: The operation-wide lock.
- pool: A thread pool in which to execute customer code.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- protocol_manager: The _interfaces.ProtocolManager for the operation.
-
- Returns:
- An IngestionManager appropriate for service-side use.
- """
- subscription_creator = _ServiceSubscriptionCreator(
- servicer, operation_context, output_operator)
- return _IngestionManager(
- lock, pool, None, subscription_creator, termination_manager,
- transmission_manager, expiration_manager, protocol_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_interfaces.py b/src/python/grpcio/grpc/framework/core/_interfaces.py
deleted file mode 100644
index 63ac82f80e..0000000000
--- a/src/python/grpcio/grpc/framework/core/_interfaces.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Package-internal interfaces."""
-
-import abc
-
-import six
-
-from grpc.framework.interfaces.base import base
-
-
-class TerminationManager(six.with_metaclass(abc.ABCMeta)):
- """An object responsible for handling the termination of an operation.
-
- Attributes:
- outcome: None if the operation is active or a base.Outcome value if it has
- terminated.
- """
-
- @abc.abstractmethod
- def add_callback(self, callback):
- """Registers a callback to be called on operation termination.
-
- If the operation has already terminated the callback will not be called.
-
- Args:
- callback: A callable that will be passed a base.Outcome value.
-
- Returns:
- None if the operation has not yet terminated and the passed callback will
- be called when it does, or a base.Outcome value describing the
- operation termination if the operation has terminated and the callback
- will not be called as a result of this method call.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def emission_complete(self):
- """Indicates that emissions from customer code have completed."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def transmission_complete(self):
- """Indicates that transmissions to the remote end are complete.
-
- Returns:
- True if the operation has terminated or False if the operation remains
- ongoing.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def reception_complete(self, code, details):
- """Indicates that reception from the other side is complete.
-
- Args:
- code: An application-specific code value.
- details: An application-specific details value.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def ingestion_complete(self):
- """Indicates that customer code ingestion of received values is complete."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def expire(self):
- """Indicates that the operation must abort because it has taken too long."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def abort(self, outcome):
- """Indicates that the operation must abort for the indicated reason.
-
- Args:
- outcome: A base.Outcome indicating operation abortion.
- """
- raise NotImplementedError()
-
-
-class TransmissionManager(six.with_metaclass(abc.ABCMeta)):
- """A manager responsible for transmitting to the other end of an operation."""
-
- @abc.abstractmethod
- def kick_off(
- self, group, method, timeout, protocol_options, initial_metadata,
- payload, completion, allowance):
- """Transmits the values associated with operation invocation."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def advance(self, initial_metadata, payload, completion, allowance):
- """Accepts values for transmission to the other end of the operation.
-
- Args:
- initial_metadata: An initial metadata value to be transmitted to the other
- side of the operation. May only ever be non-None once.
- payload: A payload value.
- completion: A base.Completion value. May only ever be non-None in the last
- transmission to be made to the other side.
- allowance: A positive integer communicating the number of additional
- payloads allowed to be transmitted from the other side to this side of
- the operation, or None if no additional allowance is being granted in
- this call.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def timeout(self, timeout):
- """Accepts for transmission to the other side a new timeout value.
-
- Args:
- timeout: A positive float used as the new timeout value for the operation
- to be transmitted to the other side.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def allowance(self, allowance):
- """Indicates to this manager that the remote customer is allowing payloads.
-
- Args:
- allowance: A positive integer indicating the number of additional payloads
- the remote customer is allowing to be transmitted from this side of the
- operation.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def remote_complete(self):
- """Indicates to this manager that data from the remote side is complete."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def abort(self, outcome):
- """Indicates that the operation has aborted.
-
- Args:
- outcome: A base.Outcome for the operation. If None, indicates that the
- operation abortion should not be communicated to the other side of the
- operation.
- """
- raise NotImplementedError()
-
-
-class ExpirationManager(six.with_metaclass(abc.ABCMeta)):
- """A manager responsible for aborting the operation if it runs out of time."""
-
- @abc.abstractmethod
- def change_timeout(self, timeout):
- """Changes the timeout allotted for the operation.
-
- Operation duration is always measure from the beginning of the operation;
- calling this method changes the operation's allotted time to timeout total
- seconds, not timeout seconds from the time of this method call.
-
- Args:
- timeout: A length of time in seconds to allow for the operation.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deadline(self):
- """Returns the time until which the operation is allowed to run.
-
- Returns:
- The time (seconds since the epoch) at which the operation will expire.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def terminate(self):
- """Indicates to this manager that the operation has terminated."""
- raise NotImplementedError()
-
-
-class ProtocolManager(six.with_metaclass(abc.ABCMeta)):
- """A manager of protocol-specific values passing through an operation."""
-
- @abc.abstractmethod
- def set_protocol_receiver(self, protocol_receiver):
- """Registers the customer object that will receive protocol objects.
-
- Args:
- protocol_receiver: A base.ProtocolReceiver to which protocol objects for
- the operation should be passed.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def accept_protocol_context(self, protocol_context):
- """Accepts the protocol context object for the operation.
-
- Args:
- protocol_context: An object designated for use as the protocol context
- of the operation, with further semantics implementation-determined.
- """
- raise NotImplementedError()
-
-
-class EmissionManager(six.with_metaclass(abc.ABCMeta, base.Operator)):
- """A manager of values emitted by customer code."""
-
- @abc.abstractmethod
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- """Accepts a value emitted by customer code.
-
- This method should only be called by customer code.
-
- Args:
- initial_metadata: An initial metadata value emitted by the local customer
- to be sent to the other side of the operation.
- payload: A payload value emitted by the local customer to be sent to the
- other side of the operation.
- completion: A Completion value emitted by the local customer to be sent to
- the other side of the operation.
- allowance: A positive integer indicating an additional number of payloads
- that the local customer is willing to accept from the other side of the
- operation.
- """
- raise NotImplementedError()
-
-
-class IngestionManager(six.with_metaclass(abc.ABCMeta)):
- """A manager responsible for executing customer code.
-
- This name of this manager comes from its responsibility to pass successive
- values from the other side of the operation into the code of the local
- customer.
- """
-
- @abc.abstractmethod
- def set_group_and_method(self, group, method):
- """Communicates to this IngestionManager the operation group and method.
-
- Args:
- group: The group identifier of the operation.
- method: The method identifier of the operation.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def add_local_allowance(self, allowance):
- """Communicates to this IngestionManager that more payloads may be ingested.
-
- Args:
- allowance: A positive integer indicating an additional number of payloads
- that the local customer is willing to ingest.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def local_emissions_done(self):
- """Indicates to this manager that local emissions are done."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def advance(self, initial_metadata, payload, completion, allowance):
- """Advances the operation by passing values to the local customer."""
- raise NotImplementedError()
-
-
-class ReceptionManager(six.with_metaclass(abc.ABCMeta)):
- """A manager responsible for receiving tickets from the other end."""
-
- @abc.abstractmethod
- def receive_ticket(self, ticket):
- """Handle a ticket from the other side of the operation.
-
- Args:
- ticket: A links.Ticket for the operation.
- """
- raise NotImplementedError()
-
-
-class Operation(six.with_metaclass(abc.ABCMeta)):
- """An ongoing operation.
-
- Attributes:
- context: A base.OperationContext object for the operation.
- operator: A base.Operator object for the operation for use by the customer
- of the operation.
- """
-
- @abc.abstractmethod
- def handle_ticket(self, ticket):
- """Handle a ticket from the other side of the operation.
-
- Args:
- ticket: A links.Ticket from the other side of the operation.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def abort(self, outcome_kind):
- """Aborts the operation.
-
- Args:
- outcome_kind: A base.Outcome.Kind value indicating operation abortion.
- """
- raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/core/_operation.py b/src/python/grpcio/grpc/framework/core/_operation.py
deleted file mode 100644
index 020c0c9ed9..0000000000
--- a/src/python/grpcio/grpc/framework/core/_operation.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementation of operations."""
-
-import threading
-
-from grpc.framework.core import _context
-from grpc.framework.core import _emission
-from grpc.framework.core import _expiration
-from grpc.framework.core import _ingestion
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _protocol
-from grpc.framework.core import _reception
-from grpc.framework.core import _termination
-from grpc.framework.core import _transmission
-from grpc.framework.core import _utilities
-
-
-class _EasyOperation(_interfaces.Operation):
- """A trivial implementation of interfaces.Operation."""
-
- def __init__(
- self, lock, termination_manager, transmission_manager, expiration_manager,
- context, operator, reception_manager):
- """Constructor.
-
- Args:
- lock: The operation-wide lock.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- context: A base.OperationContext for use by the customer during the
- operation.
- operator: A base.Operator for use by the customer during the operation.
- reception_manager: The _interfaces.ReceptionManager for the operation.
- """
- self._lock = lock
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
- self._reception_manager = reception_manager
-
- self.context = context
- self.operator = operator
-
- def handle_ticket(self, ticket):
- with self._lock:
- self._reception_manager.receive_ticket(ticket)
-
- def abort(self, outcome_kind):
- with self._lock:
- if self._termination_manager.outcome is None:
- outcome = _utilities.Outcome(outcome_kind, None, None)
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(outcome)
- self._expiration_manager.terminate()
-
-
-def invocation_operate(
- operation_id, group, method, subscription, timeout, protocol_options,
- initial_metadata, payload, completion, ticket_sink, termination_action,
- pool):
- """Constructs objects necessary for front-side operation management.
-
- Args:
- operation_id: An object identifying the operation.
- group: The group identifier of the operation.
- method: The method identifier of the operation.
- subscription: A base.Subscription describing the customer's interest in the
- results of the operation.
- timeout: A length of time in seconds to allow for the operation.
- protocol_options: A transport-specific, application-specific, and/or
- protocol-specific value relating to the invocation. May be None.
- initial_metadata: An initial metadata value to be sent to the other side of
- the operation. May be None if the initial metadata will be passed later or
- if there will be no initial metadata passed at all.
- payload: The first payload value to be transmitted to the other side. May be
- None if there is no such value or if the customer chose not to pass it at
- operation invocation.
- completion: A base.Completion value indicating the end of values passed to
- the other side of the operation.
- ticket_sink: A callable that accepts links.Tickets and delivers them to the
- other side of the operation.
- termination_action: A callable that accepts the outcome of the operation as
- a base.Outcome value to be called on operation completion.
- pool: A thread pool with which to do the work of the operation.
-
- Returns:
- An _interfaces.Operation for the operation.
- """
- lock = threading.Lock()
- with lock:
- termination_manager = _termination.invocation_termination_manager(
- termination_action, pool)
- transmission_manager = _transmission.TransmissionManager(
- operation_id, ticket_sink, lock, pool, termination_manager)
- expiration_manager = _expiration.invocation_expiration_manager(
- timeout, lock, termination_manager, transmission_manager)
- protocol_manager = _protocol.invocation_protocol_manager(
- subscription, lock, pool, termination_manager, transmission_manager,
- expiration_manager)
- operation_context = _context.OperationContext(
- lock, termination_manager, transmission_manager, expiration_manager)
- emission_manager = _emission.EmissionManager(
- lock, termination_manager, transmission_manager, expiration_manager)
- ingestion_manager = _ingestion.invocation_ingestion_manager(
- subscription, lock, pool, termination_manager, transmission_manager,
- expiration_manager, protocol_manager)
- reception_manager = _reception.ReceptionManager(
- termination_manager, transmission_manager, expiration_manager,
- protocol_manager, ingestion_manager)
-
- termination_manager.set_expiration_manager(expiration_manager)
- transmission_manager.set_expiration_manager(expiration_manager)
- emission_manager.set_ingestion_manager(ingestion_manager)
-
- transmission_manager.kick_off(
- group, method, timeout, protocol_options, initial_metadata, payload,
- completion, None)
-
- return _EasyOperation(
- lock, termination_manager, transmission_manager, expiration_manager,
- operation_context, emission_manager, reception_manager)
-
-
-def service_operate(
- servicer_package, ticket, ticket_sink, termination_action, pool):
- """Constructs an Operation for service of an operation.
-
- Args:
- servicer_package: A _utilities.ServicerPackage to be used servicing the
- operation.
- ticket: The first links.Ticket received for the operation.
- ticket_sink: A callable that accepts links.Tickets and delivers them to the
- other side of the operation.
- termination_action: A callable that accepts the outcome of the operation as
- a base.Outcome value to be called on operation completion.
- pool: A thread pool with which to do the work of the operation.
-
- Returns:
- An _interfaces.Operation for the operation.
- """
- lock = threading.Lock()
- with lock:
- termination_manager = _termination.service_termination_manager(
- termination_action, pool)
- transmission_manager = _transmission.TransmissionManager(
- ticket.operation_id, ticket_sink, lock, pool, termination_manager)
- expiration_manager = _expiration.service_expiration_manager(
- ticket.timeout, servicer_package.default_timeout,
- servicer_package.maximum_timeout, lock, termination_manager,
- transmission_manager)
- protocol_manager = _protocol.service_protocol_manager(
- lock, pool, termination_manager, transmission_manager,
- expiration_manager)
- operation_context = _context.OperationContext(
- lock, termination_manager, transmission_manager, expiration_manager)
- emission_manager = _emission.EmissionManager(
- lock, termination_manager, transmission_manager, expiration_manager)
- ingestion_manager = _ingestion.service_ingestion_manager(
- servicer_package.servicer, operation_context, emission_manager, lock,
- pool, termination_manager, transmission_manager, expiration_manager,
- protocol_manager)
- reception_manager = _reception.ReceptionManager(
- termination_manager, transmission_manager, expiration_manager,
- protocol_manager, ingestion_manager)
-
- termination_manager.set_expiration_manager(expiration_manager)
- transmission_manager.set_expiration_manager(expiration_manager)
- emission_manager.set_ingestion_manager(ingestion_manager)
-
- reception_manager.receive_ticket(ticket)
-
- return _EasyOperation(
- lock, termination_manager, transmission_manager, expiration_manager,
- operation_context, emission_manager, reception_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_protocol.py b/src/python/grpcio/grpc/framework/core/_protocol.py
deleted file mode 100644
index 3177b5e302..0000000000
--- a/src/python/grpcio/grpc/framework/core/_protocol.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for passing protocol objects in an operation."""
-
-import collections
-import enum
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-_EXCEPTION_LOG_MESSAGE = 'Exception delivering protocol object!'
-
-_LOCAL_FAILURE_OUTCOME = _utilities.Outcome(
- base.Outcome.Kind.LOCAL_FAILURE, None, None)
-
-
-class _Awaited(
- collections.namedtuple('_Awaited', ('kind', 'value',))):
-
- @enum.unique
- class Kind(enum.Enum):
- NOT_YET_ARRIVED = 'not yet arrived'
- ARRIVED = 'arrived'
-
-_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None)
-_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None)
-
-
-class _Transitory(
- collections.namedtuple('_Transitory', ('kind', 'value',))):
-
- @enum.unique
- class Kind(enum.Enum):
- NOT_YET_SEEN = 'not yet seen'
- PRESENT = 'present'
- GONE = 'gone'
-
-_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None)
-_GONE = _Transitory(_Transitory.Kind.GONE, None)
-
-
-class _ProtocolManager(_interfaces.ProtocolManager):
- """An implementation of _interfaces.ExpirationManager."""
-
- def __init__(
- self, protocol_receiver, lock, pool, termination_manager,
- transmission_manager, expiration_manager):
- """Constructor.
-
- Args:
- protocol_receiver: An _Awaited wrapping of the base.ProtocolReceiver to
- which protocol objects should be passed during the operation. May be
- of kind _Awaited.Kind.NOT_YET_ARRIVED if the customer's subscription is
- not yet known and may be of kind _Awaited.Kind.ARRIVED but with a value
- of None if the customer's subscription did not include a
- ProtocolReceiver.
- lock: The operation-wide lock.
- pool: A thread pool.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- """
- self._lock = lock
- self._pool = pool
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
-
- self._protocol_receiver = protocol_receiver
- self._context = _NOT_YET_SEEN
-
- def _abort_and_notify(self, outcome):
- if self._termination_manager.outcome is None:
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(outcome)
- self._expiration_manager.terminate()
-
- def _deliver(self, behavior, value):
- def deliver():
- delivery_outcome = callable_util.call_logging_exceptions(
- behavior, _EXCEPTION_LOG_MESSAGE, value)
- if delivery_outcome.kind is callable_util.Outcome.Kind.RAISED:
- with self._lock:
- self._abort_and_notify(_LOCAL_FAILURE_OUTCOME)
- self._pool.submit(
- callable_util.with_exceptions_logged(
- deliver, _constants.INTERNAL_ERROR_LOG_MESSAGE))
-
- def set_protocol_receiver(self, protocol_receiver):
- """See _interfaces.ProtocolManager.set_protocol_receiver for spec."""
- self._protocol_receiver = _Awaited(_Awaited.Kind.ARRIVED, protocol_receiver)
- if (self._context.kind is _Transitory.Kind.PRESENT and
- protocol_receiver is not None):
- self._deliver(protocol_receiver.context, self._context.value)
- self._context = _GONE
-
- def accept_protocol_context(self, protocol_context):
- """See _interfaces.ProtocolManager.accept_protocol_context for spec."""
- if self._protocol_receiver.kind is _Awaited.Kind.ARRIVED:
- if self._protocol_receiver.value is not None:
- self._deliver(self._protocol_receiver.value.context, protocol_context)
- self._context = _GONE
- else:
- self._context = _Transitory(_Transitory.Kind.PRESENT, protocol_context)
-
-
-def invocation_protocol_manager(
- subscription, lock, pool, termination_manager, transmission_manager,
- expiration_manager):
- """Creates an _interfaces.ProtocolManager for invocation-side use.
-
- Args:
- subscription: The local customer's subscription to the operation.
- lock: The operation-wide lock.
- pool: A thread pool.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- """
- if subscription.kind is base.Subscription.Kind.FULL:
- awaited_protocol_receiver = _Awaited(
- _Awaited.Kind.ARRIVED, subscription.protocol_receiver)
- else:
- awaited_protocol_receiver = _ARRIVED_AND_NONE
- return _ProtocolManager(
- awaited_protocol_receiver, lock, pool, termination_manager,
- transmission_manager, expiration_manager)
-
-
-def service_protocol_manager(
- lock, pool, termination_manager, transmission_manager, expiration_manager):
- """Creates an _interfaces.ProtocolManager for service-side use.
-
- Args:
- lock: The operation-wide lock.
- pool: A thread pool.
- termination_manager: The _interfaces.TerminationManager for the operation.
- transmission_manager: The _interfaces.TransmissionManager for the
- operation.
- expiration_manager: The _interfaces.ExpirationManager for the operation.
- """
- return _ProtocolManager(
- _NOT_YET_ARRIVED, lock, pool, termination_manager, transmission_manager,
- expiration_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py
deleted file mode 100644
index ff81450dee..0000000000
--- a/src/python/grpcio/grpc/framework/core/_reception.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ticket reception."""
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.links import links
-
-_REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND = {
- links.Ticket.Termination.CANCELLATION: base.Outcome.Kind.CANCELLED,
- links.Ticket.Termination.EXPIRATION: base.Outcome.Kind.EXPIRED,
- links.Ticket.Termination.SHUTDOWN: base.Outcome.Kind.REMOTE_SHUTDOWN,
- links.Ticket.Termination.RECEPTION_FAILURE:
- base.Outcome.Kind.RECEPTION_FAILURE,
- links.Ticket.Termination.TRANSMISSION_FAILURE:
- base.Outcome.Kind.TRANSMISSION_FAILURE,
- links.Ticket.Termination.LOCAL_FAILURE: base.Outcome.Kind.REMOTE_FAILURE,
- links.Ticket.Termination.REMOTE_FAILURE: base.Outcome.Kind.LOCAL_FAILURE,
-}
-
-_RECEPTION_FAILURE_OUTCOME = _utilities.Outcome(
- base.Outcome.Kind.RECEPTION_FAILURE, None, None)
-
-
-def _carrying_protocol_context(ticket):
- return ticket.protocol is not None and ticket.protocol.kind in (
- links.Protocol.Kind.INVOCATION_CONTEXT,
- links.Protocol.Kind.SERVICER_CONTEXT,)
-
-
-class ReceptionManager(_interfaces.ReceptionManager):
- """A ReceptionManager based around a _Receiver passed to it."""
-
- def __init__(
- self, termination_manager, transmission_manager, expiration_manager,
- protocol_manager, ingestion_manager):
- """Constructor.
-
- Args:
- termination_manager: The operation's _interfaces.TerminationManager.
- transmission_manager: The operation's _interfaces.TransmissionManager.
- expiration_manager: The operation's _interfaces.ExpirationManager.
- protocol_manager: The operation's _interfaces.ProtocolManager.
- ingestion_manager: The operation's _interfaces.IngestionManager.
- """
- self._termination_manager = termination_manager
- self._transmission_manager = transmission_manager
- self._expiration_manager = expiration_manager
- self._protocol_manager = protocol_manager
- self._ingestion_manager = ingestion_manager
-
- self._lowest_unseen_sequence_number = 0
- self._out_of_sequence_tickets = {}
- self._aborted = False
-
- def _abort(self, outcome):
- self._aborted = True
- if self._termination_manager.outcome is None:
- self._termination_manager.abort(outcome)
- self._transmission_manager.abort(None)
- self._expiration_manager.terminate()
-
- def _sequence_failure(self, ticket):
- """Determines a just-arrived ticket's sequential legitimacy.
-
- Args:
- ticket: A just-arrived ticket.
-
- Returns:
- True if the ticket is sequentially legitimate; False otherwise.
- """
- if ticket.sequence_number < self._lowest_unseen_sequence_number:
- return True
- elif ticket.sequence_number in self._out_of_sequence_tickets:
- return True
- else:
- return False
-
- def _process_one(self, ticket):
- if ticket.sequence_number == 0:
- self._ingestion_manager.set_group_and_method(ticket.group, ticket.method)
- if _carrying_protocol_context(ticket):
- self._protocol_manager.accept_protocol_context(ticket.protocol.value)
- else:
- self._protocol_manager.accept_protocol_context(None)
- if ticket.timeout is not None:
- self._expiration_manager.change_timeout(ticket.timeout)
- if ticket.termination is None:
- completion = None
- else:
- completion = utilities.completion(
- ticket.terminal_metadata, ticket.code, ticket.message)
- self._termination_manager.reception_complete(ticket.code, ticket.message)
- self._ingestion_manager.advance(
- ticket.initial_metadata, ticket.payload, completion, ticket.allowance)
- if ticket.allowance is not None:
- self._transmission_manager.allowance(ticket.allowance)
-
- def _process(self, ticket):
- """Process those tickets ready to be processed.
-
- Args:
- ticket: A just-arrived ticket the sequence number of which matches this
- _ReceptionManager's _lowest_unseen_sequence_number field.
- """
- while True:
- self._process_one(ticket)
- next_ticket = self._out_of_sequence_tickets.pop(
- ticket.sequence_number + 1, None)
- if next_ticket is None:
- self._lowest_unseen_sequence_number = ticket.sequence_number + 1
- return
- else:
- ticket = next_ticket
-
- def receive_ticket(self, ticket):
- """See _interfaces.ReceptionManager.receive_ticket for specification."""
- if self._aborted:
- return
- elif self._sequence_failure(ticket):
- self._abort(_RECEPTION_FAILURE_OUTCOME)
- elif ticket.termination not in (None, links.Ticket.Termination.COMPLETION):
- outcome_kind = _REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND[
- ticket.termination]
- self._abort(
- _utilities.Outcome(outcome_kind, ticket.code, ticket.message))
- elif ticket.sequence_number == self._lowest_unseen_sequence_number:
- self._process(ticket)
- else:
- self._out_of_sequence_tickets[ticket.sequence_number] = ticket
diff --git a/src/python/grpcio/grpc/framework/core/_termination.py b/src/python/grpcio/grpc/framework/core/_termination.py
deleted file mode 100644
index fff3a3fc14..0000000000
--- a/src/python/grpcio/grpc/framework/core/_termination.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation termination."""
-
-import abc
-
-import six
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-
-def _invocation_completion_predicate(
- unused_emission_complete, unused_transmission_complete,
- unused_reception_complete, ingestion_complete):
- return ingestion_complete
-
-
-def _service_completion_predicate(
- unused_emission_complete, transmission_complete, unused_reception_complete,
- ingestion_complete):
- return transmission_complete and ingestion_complete
-
-
-class TerminationManager(six.with_metaclass(abc.ABCMeta, _interfaces.TerminationManager)):
- """A _interfaces.TransmissionManager on which another manager may be set."""
-
- @abc.abstractmethod
- def set_expiration_manager(self, expiration_manager):
- """Sets the expiration manager with which this manager will interact.
-
- Args:
- expiration_manager: The _interfaces.ExpirationManager associated with the
- current operation.
- """
- raise NotImplementedError()
-
-
-class _TerminationManager(TerminationManager):
- """An implementation of TerminationManager."""
-
- def __init__(self, predicate, action, pool):
- """Constructor.
-
- Args:
- predicate: One of _invocation_completion_predicate or
- _service_completion_predicate to be used to determine when the operation
- has completed.
- action: A behavior to pass the operation outcome's kind on operation
- termination.
- pool: A thread pool.
- """
- self._predicate = predicate
- self._action = action
- self._pool = pool
- self._expiration_manager = None
-
- self._callbacks = []
-
- self._code = None
- self._details = None
- self._emission_complete = False
- self._transmission_complete = False
- self._reception_complete = False
- self._ingestion_complete = False
-
- # The None-ness of outcome is the operation-wide record of whether and how
- # the operation has terminated.
- self.outcome = None
-
- def set_expiration_manager(self, expiration_manager):
- self._expiration_manager = expiration_manager
-
- def _terminate_internal_only(self, outcome):
- """Terminates the operation.
-
- Args:
- outcome: A base.Outcome describing the outcome of the operation.
- """
- self.outcome = outcome
- callbacks = list(self._callbacks)
- self._callbacks = None
-
- act = callable_util.with_exceptions_logged(
- self._action, _constants.INTERNAL_ERROR_LOG_MESSAGE)
-
- # TODO(issue 3202): Don't call the local application's callbacks if it has
- # previously shown a programming defect.
- if False and outcome.kind is base.Outcome.Kind.LOCAL_FAILURE:
- self._pool.submit(act, base.Outcome.Kind.LOCAL_FAILURE)
- else:
- def call_callbacks_and_act(callbacks, outcome):
- for callback in callbacks:
- callback_outcome = callable_util.call_logging_exceptions(
- callback, _constants.TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE,
- outcome)
- if callback_outcome.exception is not None:
- act_outcome_kind = base.Outcome.Kind.LOCAL_FAILURE
- break
- else:
- act_outcome_kind = outcome.kind
- act(act_outcome_kind)
-
- self._pool.submit(
- callable_util.with_exceptions_logged(
- call_callbacks_and_act, _constants.INTERNAL_ERROR_LOG_MESSAGE),
- callbacks, outcome)
-
- def _terminate_and_notify(self, outcome):
- self._terminate_internal_only(outcome)
- self._expiration_manager.terminate()
-
- def _perhaps_complete(self):
- if self._predicate(
- self._emission_complete, self._transmission_complete,
- self._reception_complete, self._ingestion_complete):
- self._terminate_and_notify(
- _utilities.Outcome(
- base.Outcome.Kind.COMPLETED, self._code, self._details))
- return True
- else:
- return False
-
- def is_active(self):
- """See _interfaces.TerminationManager.is_active for specification."""
- return self.outcome is None
-
- def add_callback(self, callback):
- """See _interfaces.TerminationManager.add_callback for specification."""
- if self.outcome is None:
- self._callbacks.append(callback)
- return None
- else:
- return self.outcome
-
- def emission_complete(self):
- """See superclass method for specification."""
- if self.outcome is None:
- self._emission_complete = True
- self._perhaps_complete()
-
- def transmission_complete(self):
- """See superclass method for specification."""
- if self.outcome is None:
- self._transmission_complete = True
- return self._perhaps_complete()
- else:
- return False
-
- def reception_complete(self, code, details):
- """See superclass method for specification."""
- if self.outcome is None:
- self._reception_complete = True
- self._code = code
- self._details = details
- self._perhaps_complete()
-
- def ingestion_complete(self):
- """See superclass method for specification."""
- if self.outcome is None:
- self._ingestion_complete = True
- self._perhaps_complete()
-
- def expire(self):
- """See _interfaces.TerminationManager.expire for specification."""
- self._terminate_internal_only(
- _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None))
-
- def abort(self, outcome):
- """See _interfaces.TerminationManager.abort for specification."""
- self._terminate_and_notify(outcome)
-
-
-def invocation_termination_manager(action, pool):
- """Creates a TerminationManager appropriate for invocation-side use.
-
- Args:
- action: An action to call on operation termination.
- pool: A thread pool in which to execute the passed action and any
- termination callbacks that are registered during the operation.
-
- Returns:
- A TerminationManager appropriate for invocation-side use.
- """
- return _TerminationManager(_invocation_completion_predicate, action, pool)
-
-
-def service_termination_manager(action, pool):
- """Creates a TerminationManager appropriate for service-side use.
-
- Args:
- action: An action to call on operation termination.
- pool: A thread pool in which to execute the passed action and any
- termination callbacks that are registered during the operation.
-
- Returns:
- A TerminationManager appropriate for service-side use.
- """
- return _TerminationManager(_service_completion_predicate, action, pool)
diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py
deleted file mode 100644
index 65b12c4160..0000000000
--- a/src/python/grpcio/grpc/framework/core/_transmission.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ticket transmission during an operation."""
-
-import collections
-import enum
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-
-_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!'
-
-_TRANSMISSION_FAILURE_OUTCOME = _utilities.Outcome(
- base.Outcome.Kind.TRANSMISSION_FAILURE, None, None)
-
-
-def _explode_completion(completion):
- if completion is None:
- return None, None, None, None
- else:
- return (
- completion.terminal_metadata, completion.code, completion.message,
- links.Ticket.Termination.COMPLETION)
-
-
-class _Abort(
- collections.namedtuple(
- '_Abort', ('kind', 'termination', 'code', 'details',))):
- """Tracks whether the operation aborted and what is to be done about it.
-
- Attributes:
- kind: A Kind value describing the overall kind of the _Abort.
- termination: A links.Ticket.Termination value to be sent to the other side
- of the operation. Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
- code: A code value to be sent to the other side of the operation. Only
- valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
- details: A details value to be sent to the other side of the operation.
- Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
- """
-
- @enum.unique
- class Kind(enum.Enum):
- NOT_ABORTED = 'not aborted'
- ABORTED_NOTIFY_NEEDED = 'aborted notify needed'
- ABORTED_NO_NOTIFY = 'aborted no notify'
-
-_NOT_ABORTED = _Abort(_Abort.Kind.NOT_ABORTED, None, None, None)
-_ABORTED_NO_NOTIFY = _Abort(_Abort.Kind.ABORTED_NO_NOTIFY, None, None, None)
-
-
-class TransmissionManager(_interfaces.TransmissionManager):
- """An _interfaces.TransmissionManager that sends links.Tickets."""
-
- def __init__(
- self, operation_id, ticket_sink, lock, pool, termination_manager):
- """Constructor.
-
- Args:
- operation_id: The operation's ID.
- ticket_sink: A callable that accepts tickets and sends them to the other
- side of the operation.
- lock: The operation-servicing-wide lock object.
- pool: A thread pool in which the work of transmitting tickets will be
- performed.
- termination_manager: The _interfaces.TerminationManager associated with
- this operation.
- """
- self._lock = lock
- self._pool = pool
- self._ticket_sink = ticket_sink
- self._operation_id = operation_id
- self._termination_manager = termination_manager
- self._expiration_manager = None
-
- self._lowest_unused_sequence_number = 0
- self._remote_allowance = 1
- self._remote_complete = False
- self._timeout = None
- self._local_allowance = 0
- self._initial_metadata = None
- self._payloads = []
- self._completion = None
- self._abort = _NOT_ABORTED
- self._transmitting = False
-
- def set_expiration_manager(self, expiration_manager):
- """Sets the ExpirationManager with which this manager will cooperate."""
- self._expiration_manager = expiration_manager
-
- def _next_ticket(self):
- """Creates the next ticket to be transmitted.
-
- Returns:
- A links.Ticket to be sent to the other side of the operation or None if
- there is nothing to be sent at this time.
- """
- if self._abort.kind is _Abort.Kind.ABORTED_NO_NOTIFY:
- return None
- elif self._abort.kind is _Abort.Kind.ABORTED_NOTIFY_NEEDED:
- termination = self._abort.termination
- code, details = self._abort.code, self._abort.details
- self._abort = _ABORTED_NO_NOTIFY
- return links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None, None,
- None, None, None, None, None, None, code, details, termination, None)
-
- action = False
- # TODO(nathaniel): Support other subscriptions.
- local_subscription = links.Ticket.Subscription.FULL
- timeout = self._timeout
- if timeout is not None:
- self._timeout = None
- action = True
- if self._local_allowance <= 0:
- allowance = None
- else:
- allowance = self._local_allowance
- self._local_allowance = 0
- action = True
- initial_metadata = self._initial_metadata
- if initial_metadata is not None:
- self._initial_metadata = None
- action = True
- if not self._payloads or self._remote_allowance <= 0:
- payload = None
- else:
- payload = self._payloads.pop(0)
- self._remote_allowance -= 1
- action = True
- if self._completion is None or self._payloads:
- terminal_metadata, code, message, termination = None, None, None, None
- else:
- terminal_metadata, code, message, termination = _explode_completion(
- self._completion)
- self._completion = None
- action = True
-
- if action:
- ticket = links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None, None,
- local_subscription, timeout, allowance, initial_metadata, payload,
- terminal_metadata, code, message, termination, None)
- self._lowest_unused_sequence_number += 1
- return ticket
- else:
- return None
-
- def _transmit(self, ticket):
- """Commences the transmission loop sending tickets.
-
- Args:
- ticket: A links.Ticket to be sent to the other side of the operation.
- """
- def transmit(ticket):
- while True:
- transmission_outcome = callable_util.call_logging_exceptions(
- self._ticket_sink, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, ticket)
- if transmission_outcome.exception is None:
- with self._lock:
- if ticket.termination is links.Ticket.Termination.COMPLETION:
- self._termination_manager.transmission_complete()
- ticket = self._next_ticket()
- if ticket is None:
- self._transmitting = False
- return
- else:
- with self._lock:
- self._abort = _ABORTED_NO_NOTIFY
- if self._termination_manager.outcome is None:
- self._termination_manager.abort(_TRANSMISSION_FAILURE_OUTCOME)
- self._expiration_manager.terminate()
- return
-
- self._pool.submit(callable_util.with_exceptions_logged(
- transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), ticket)
- self._transmitting = True
-
- def kick_off(
- self, group, method, timeout, protocol_options, initial_metadata,
- payload, completion, allowance):
- """See _interfaces.TransmissionManager.kickoff for specification."""
- # TODO(nathaniel): Support other subscriptions.
- subscription = links.Ticket.Subscription.FULL
- terminal_metadata, code, message, termination = _explode_completion(
- completion)
- self._remote_allowance = 1 if payload is None else 0
- protocol = links.Protocol(links.Protocol.Kind.CALL_OPTION, protocol_options)
- ticket = links.Ticket(
- self._operation_id, 0, group, method, subscription, timeout, allowance,
- initial_metadata, payload, terminal_metadata, code, message,
- termination, protocol)
- self._lowest_unused_sequence_number = 1
- self._transmit(ticket)
-
- def advance(self, initial_metadata, payload, completion, allowance):
- """See _interfaces.TransmissionManager.advance for specification."""
- if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
- return
-
- effective_initial_metadata = initial_metadata
- effective_payload = payload
- effective_completion = completion
- if allowance is not None and not self._remote_complete:
- effective_allowance = allowance
- else:
- effective_allowance = None
- if self._transmitting:
- if effective_initial_metadata is not None:
- self._initial_metadata = effective_initial_metadata
- if effective_payload is not None:
- self._payloads.append(effective_payload)
- if effective_completion is not None:
- self._completion = effective_completion
- if effective_allowance is not None:
- self._local_allowance += effective_allowance
- else:
- if effective_payload is not None:
- if 0 < self._remote_allowance:
- ticket_payload = effective_payload
- self._remote_allowance -= 1
- else:
- self._payloads.append(effective_payload)
- ticket_payload = None
- else:
- ticket_payload = None
- if effective_completion is not None and not self._payloads:
- ticket_completion = effective_completion
- else:
- self._completion = effective_completion
- ticket_completion = None
- if any(
- (effective_initial_metadata, ticket_payload, ticket_completion,
- effective_allowance)):
- terminal_metadata, code, message, termination = _explode_completion(
- completion)
- ticket = links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None, None,
- None, None, allowance, effective_initial_metadata, ticket_payload,
- terminal_metadata, code, message, termination, None)
- self._lowest_unused_sequence_number += 1
- self._transmit(ticket)
-
- def timeout(self, timeout):
- """See _interfaces.TransmissionManager.timeout for specification."""
- if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
- return
- elif self._transmitting:
- self._timeout = timeout
- else:
- ticket = links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None, None,
- None, timeout, None, None, None, None, None, None, None, None)
- self._lowest_unused_sequence_number += 1
- self._transmit(ticket)
-
- def allowance(self, allowance):
- """See _interfaces.TransmissionManager.allowance for specification."""
- if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
- return
- elif self._transmitting or not self._payloads:
- self._remote_allowance += allowance
- else:
- self._remote_allowance += allowance - 1
- payload = self._payloads.pop(0)
- if self._payloads:
- completion = None
- else:
- completion = self._completion
- self._completion = None
- terminal_metadata, code, message, termination = _explode_completion(
- completion)
- ticket = links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None, None,
- None, None, None, None, payload, terminal_metadata, code, message,
- termination, None)
- self._lowest_unused_sequence_number += 1
- self._transmit(ticket)
-
- def remote_complete(self):
- """See _interfaces.TransmissionManager.remote_complete for specification."""
- self._remote_complete = True
- self._local_allowance = 0
-
- def abort(self, outcome):
- """See _interfaces.TransmissionManager.abort for specification."""
- if self._abort.kind is _Abort.Kind.NOT_ABORTED:
- if outcome is None:
- self._abort = _ABORTED_NO_NOTIFY
- else:
- termination = _constants.ABORTION_OUTCOME_TO_TICKET_TERMINATION.get(
- outcome.kind)
- if termination is None:
- self._abort = _ABORTED_NO_NOTIFY
- elif self._transmitting:
- self._abort = _Abort(
- _Abort.Kind.ABORTED_NOTIFY_NEEDED, termination, outcome.code,
- outcome.details)
- else:
- ticket = links.Ticket(
- self._operation_id, self._lowest_unused_sequence_number, None,
- None, None, None, None, None, None, None, outcome.code,
- outcome.details, termination, None)
- self._transmit(ticket)
- self._abort = _ABORTED_NO_NOTIFY
diff --git a/src/python/grpcio/grpc/framework/core/_utilities.py b/src/python/grpcio/grpc/framework/core/_utilities.py
deleted file mode 100644
index abedc727e4..0000000000
--- a/src/python/grpcio/grpc/framework/core/_utilities.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Package-internal utilities."""
-
-import collections
-
-from grpc.framework.interfaces.base import base
-
-
-class ServicerPackage(
- collections.namedtuple(
- 'ServicerPackage', ('servicer', 'default_timeout', 'maximum_timeout'))):
- """A trivial bundle class.
-
- Attributes:
- servicer: A base.Servicer.
- default_timeout: A float indicating the length of time in seconds to allow
- for an operation invoked without a timeout.
- maximum_timeout: A float indicating the maximum length of time in seconds to
- allow for an operation.
- """
-
-
-class Outcome(
- base.Outcome,
- collections.namedtuple('Outcome', ('kind', 'code', 'details',))):
- """A trivial implementation of base.Outcome."""
diff --git a/src/python/grpcio/grpc/framework/core/implementations.py b/src/python/grpcio/grpc/framework/core/implementations.py
deleted file mode 100644
index 364a7faed4..0000000000
--- a/src/python/grpcio/grpc/framework/core/implementations.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Entry points into the ticket-exchange-based base layer implementation."""
-
-# base and links are referenced from specification in this module.
-from grpc.framework.core import _end
-from grpc.framework.interfaces.base import base # pylint: disable=unused-import
-from grpc.framework.interfaces.links import links # pylint: disable=unused-import
-
-
-def invocation_end_link():
- """Creates a base.End-links.Link suitable for operation invocation.
-
- Returns:
- An object that is both a base.End and a links.Link, that supports operation
- invocation, and that translates operation invocation into ticket exchange.
- """
- return _end.serviceless_end_link()
-
-
-def service_end_link(servicer, default_timeout, maximum_timeout):
- """Creates a base.End-links.Link suitable for operation service.
-
- Args:
- servicer: A base.Servicer for servicing operations.
- default_timeout: A length of time in seconds to be used as the default
- time alloted for a single operation.
- maximum_timeout: A length of time in seconds to be used as the maximum
- time alloted for a single operation.
-
- Returns:
- An object that is both a base.End and a links.Link and that services
- operations that arrive at it through ticket exchange.
- """
- return _end.serviceful_end_link(servicer, default_timeout, maximum_timeout)
diff --git a/src/python/grpcio/grpc/framework/crust/__init__.py b/src/python/grpcio/grpc/framework/crust/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio/grpc/framework/crust/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/crust/_calls.py b/src/python/grpcio/grpc/framework/crust/_calls.py
deleted file mode 100644
index bff940d747..0000000000
--- a/src/python/grpcio/grpc/framework/crust/_calls.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Utility functions for invoking RPCs."""
-
-from grpc.framework.crust import _control
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
-
-_EMPTY_COMPLETION = utilities.completion(None, None, None)
-
-
-def _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- complete):
- rendezvous = _control.Rendezvous(None, None)
- subscription = utilities.full_subscription(
- rendezvous, _control.protocol_receiver(rendezvous))
- operation_context, operator = end.operate(
- group, method, subscription, timeout, protocol_options=protocol_options,
- initial_metadata=initial_metadata, payload=payload,
- completion=_EMPTY_COMPLETION if complete else None)
- rendezvous.set_operator_and_context(operator, operation_context)
- outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
- if outcome is not None:
- rendezvous.set_outcome(outcome)
- return rendezvous, operation_context, outcome
-
-
-def _event_return_unary(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
- if outcome is None:
- def in_pool():
- abortion = rendezvous.add_abortion_callback(abortion_callback)
- if abortion is None:
- try:
- receiver.initial_metadata(rendezvous.initial_metadata())
- receiver.response(next(rendezvous))
- receiver.complete(
- rendezvous.terminal_metadata(), rendezvous.code(),
- rendezvous.details())
- except face.AbortionError:
- pass
- else:
- abortion_callback(abortion)
- pool.submit(_control.pool_wrap(in_pool, operation_context))
- return rendezvous
-
-
-def _event_return_stream(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
- if outcome is None:
- def in_pool():
- abortion = rendezvous.add_abortion_callback(abortion_callback)
- if abortion is None:
- try:
- receiver.initial_metadata(rendezvous.initial_metadata())
- for response in rendezvous:
- receiver.response(response)
- receiver.complete(
- rendezvous.terminal_metadata(), rendezvous.code(),
- rendezvous.details())
- except face.AbortionError:
- pass
- else:
- abortion_callback(abortion)
- pool.submit(_control.pool_wrap(in_pool, operation_context))
- return rendezvous
-
-
-def blocking_unary_unary(
- end, group, method, timeout, with_call, protocol_options, initial_metadata,
- payload):
- """Services in a blocking fashion a unary-unary servicer method."""
- rendezvous, unused_operation_context, unused_outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- True)
- if with_call:
- return next(rendezvous), rendezvous
- else:
- return next(rendezvous)
-
-
-def future_unary_unary(
- end, group, method, timeout, protocol_options, initial_metadata, payload):
- """Services a value-in value-out servicer method by returning a Future."""
- rendezvous, unused_operation_context, unused_outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- True)
- return rendezvous
-
-
-def inline_unary_stream(
- end, group, method, timeout, protocol_options, initial_metadata, payload):
- """Services a value-in stream-out servicer method."""
- rendezvous, unused_operation_context, unused_outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- True)
- return rendezvous
-
-
-def blocking_stream_unary(
- end, group, method, timeout, with_call, protocol_options, initial_metadata,
- payload_iterator, pool):
- """Services in a blocking fashion a stream-in value-out servicer method."""
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, None,
- False)
- if outcome is None:
- def in_pool():
- for payload in payload_iterator:
- rendezvous.consume(payload)
- rendezvous.terminate()
- pool.submit(_control.pool_wrap(in_pool, operation_context))
- if with_call:
- return next(rendezvous), rendezvous
- else:
- return next(rendezvous)
- else:
- if with_call:
- return next(rendezvous), rendezvous
- else:
- return next(rendezvous)
-
-
-def future_stream_unary(
- end, group, method, timeout, protocol_options, initial_metadata,
- payload_iterator, pool):
- """Services a stream-in value-out servicer method by returning a Future."""
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, None,
- False)
- if outcome is None:
- def in_pool():
- for payload in payload_iterator:
- rendezvous.consume(payload)
- rendezvous.terminate()
- pool.submit(_control.pool_wrap(in_pool, operation_context))
- return rendezvous
-
-
-def inline_stream_stream(
- end, group, method, timeout, protocol_options, initial_metadata,
- payload_iterator, pool):
- """Services a stream-in stream-out servicer method."""
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, None,
- False)
- if outcome is None:
- def in_pool():
- for payload in payload_iterator:
- rendezvous.consume(payload)
- rendezvous.terminate()
- pool.submit(_control.pool_wrap(in_pool, operation_context))
- return rendezvous
-
-
-def event_unary_unary(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- receiver, abortion_callback, pool):
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- True)
- return _event_return_unary(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_unary_stream(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- receiver, abortion_callback, pool):
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, payload,
- True)
- return _event_return_stream(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_stream_unary(
- end, group, method, timeout, protocol_options, initial_metadata, receiver,
- abortion_callback, pool):
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, None,
- False)
- return _event_return_unary(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_stream_stream(
- end, group, method, timeout, protocol_options, initial_metadata, receiver,
- abortion_callback, pool):
- rendezvous, operation_context, outcome = _invoke(
- end, group, method, timeout, protocol_options, initial_metadata, None,
- False)
- return _event_return_stream(
- receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
diff --git a/src/python/grpcio/grpc/framework/crust/_control.py b/src/python/grpcio/grpc/framework/crust/_control.py
deleted file mode 100644
index 9b4167bda0..0000000000
--- a/src/python/grpcio/grpc/framework/crust/_control.py
+++ /dev/null
@@ -1,584 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for translating between sync and async control flow."""
-
-import collections
-import enum
-import sys
-import threading
-import time
-
-from grpc.framework.foundation import abandonment
-from grpc.framework.foundation import callable_util
-from grpc.framework.foundation import future
-from grpc.framework.foundation import stream
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!'
-_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:'
-
-_CANNOT_SET_INITIAL_METADATA = (
- 'Could not set initial metadata - has it already been set, or has a ' +
- 'payload already been sent?')
-_CANNOT_SET_TERMINAL_METADATA = (
- 'Could not set terminal metadata - has it already been set, or has RPC ' +
- 'completion already been indicated?')
-_CANNOT_SET_CODE = (
- 'Could not set code - has it already been set, or has RPC completion ' +
- 'already been indicated?')
-_CANNOT_SET_DETAILS = (
- 'Could not set details - has it already been set, or has RPC completion ' +
- 'already been indicated?')
-
-
-class _DummyOperator(base.Operator):
-
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- pass
-
-_DUMMY_OPERATOR = _DummyOperator()
-
-
-class _Awaited(
- collections.namedtuple('_Awaited', ('kind', 'value',))):
-
- @enum.unique
- class Kind(enum.Enum):
- NOT_YET_ARRIVED = 'not yet arrived'
- ARRIVED = 'arrived'
-
-_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None)
-_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None)
-
-
-class _Transitory(
- collections.namedtuple('_Transitory', ('kind', 'value',))):
-
- @enum.unique
- class Kind(enum.Enum):
- NOT_YET_SEEN = 'not yet seen'
- PRESENT = 'present'
- GONE = 'gone'
-
-_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None)
-_GONE = _Transitory(_Transitory.Kind.GONE, None)
-
-
-class _Termination(
- collections.namedtuple(
- '_Termination', ('terminated', 'abortion', 'abortion_error',))):
- """Values indicating whether and how an RPC has terminated.
-
- Attributes:
- terminated: A boolean indicating whether or not the RPC has terminated.
- abortion: A face.Abortion value describing the RPC's abortion or None if the
- RPC did not abort.
- abortion_error: A face.AbortionError describing the RPC's abortion or None
- if the RPC did not abort.
- """
-
-_NOT_TERMINATED = _Termination(False, None, None)
-
-_OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR = {
- base.Outcome.Kind.COMPLETED: lambda *unused_args: _Termination(
- True, None, None),
- base.Outcome.Kind.CANCELLED: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.CANCELLED, *args),
- face.CancellationError(*args)),
- base.Outcome.Kind.EXPIRED: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.EXPIRED, *args),
- face.ExpirationError(*args)),
- base.Outcome.Kind.LOCAL_SHUTDOWN: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args),
- face.LocalShutdownError(*args)),
- base.Outcome.Kind.REMOTE_SHUTDOWN: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args),
- face.RemoteShutdownError(*args)),
- base.Outcome.Kind.RECEPTION_FAILURE: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
- face.NetworkError(*args)),
- base.Outcome.Kind.TRANSMISSION_FAILURE: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
- face.NetworkError(*args)),
- base.Outcome.Kind.LOCAL_FAILURE: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args),
- face.LocalError(*args)),
- base.Outcome.Kind.REMOTE_FAILURE: lambda *args: _Termination(
- True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args),
- face.RemoteError(*args)),
-}
-
-
-def _wait_once_until(condition, until):
- if until is None:
- condition.wait()
- else:
- remaining = until - time.time()
- if remaining < 0:
- raise future.TimeoutError()
- else:
- condition.wait(timeout=remaining)
-
-
-def _done_callback_as_operation_termination_callback(
- done_callback, rendezvous):
- def operation_termination_callback(operation_outcome):
- rendezvous.set_outcome(operation_outcome)
- done_callback(rendezvous)
- return operation_termination_callback
-
-
-def _abortion_callback_as_operation_termination_callback(
- rpc_abortion_callback, rendezvous_set_outcome):
- def operation_termination_callback(operation_outcome):
- termination = rendezvous_set_outcome(operation_outcome)
- if termination.abortion is not None:
- rpc_abortion_callback(termination.abortion)
- return operation_termination_callback
-
-
-class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call):
- """A rendez-vous for the threads of an operation.
-
- Instances of this object present iterator and stream.Consumer interfaces for
- interacting with application code and present a base.Operator interface and
- maintain a base.Operator internally for interacting with base interface code.
- """
-
- def __init__(self, operator, operation_context):
- self._condition = threading.Condition()
-
- self._operator = operator
- self._operation_context = operation_context
-
- self._protocol_context = _NOT_YET_ARRIVED
-
- self._up_initial_metadata = _NOT_YET_ARRIVED
- self._up_payload = None
- self._up_allowance = 1
- self._up_completion = _NOT_YET_ARRIVED
- self._down_initial_metadata = _NOT_YET_SEEN
- self._down_payload = None
- self._down_allowance = 1
- self._down_terminal_metadata = _NOT_YET_SEEN
- self._down_code = _NOT_YET_SEEN
- self._down_details = _NOT_YET_SEEN
-
- self._termination = _NOT_TERMINATED
-
- # The semantics of future.Future.cancel and future.Future.cancelled are
- # slightly wonky, so they have to be tracked separately from the rest of the
- # result of the RPC. This field tracks whether cancellation was requested
- # prior to termination of the RPC
- self._cancelled = False
-
- def set_operator_and_context(self, operator, operation_context):
- with self._condition:
- self._operator = operator
- self._operation_context = operation_context
-
- def _down_completion(self):
- if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN:
- terminal_metadata = None
- self._down_terminal_metadata = _GONE
- elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT:
- terminal_metadata = self._down_terminal_metadata.value
- self._down_terminal_metadata = _GONE
- else:
- terminal_metadata = None
- if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN:
- code = None
- self._down_code = _GONE
- elif self._down_code.kind is _Transitory.Kind.PRESENT:
- code = self._down_code.value
- self._down_code = _GONE
- else:
- code = None
- if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN:
- details = None
- self._down_details = _GONE
- elif self._down_details.kind is _Transitory.Kind.PRESENT:
- details = self._down_details.value
- self._down_details = _GONE
- else:
- details = None
- return utilities.completion(terminal_metadata, code, details)
-
- def _set_outcome(self, outcome):
- if not self._termination.terminated:
- self._operator = _DUMMY_OPERATOR
- self._operation_context = None
- self._down_initial_metadata = _GONE
- self._down_payload = None
- self._down_terminal_metadata = _GONE
- self._down_code = _GONE
- self._down_details = _GONE
-
- if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
- initial_metadata = None
- else:
- initial_metadata = self._up_initial_metadata.value
- if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
- terminal_metadata = None
- else:
- terminal_metadata = self._up_completion.value.terminal_metadata
- if outcome.kind is base.Outcome.Kind.COMPLETED:
- code = self._up_completion.value.code
- details = self._up_completion.value.message
- else:
- code = outcome.code
- details = outcome.details
- self._termination = _OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR[
- outcome.kind](initial_metadata, terminal_metadata, code, details)
-
- self._condition.notify_all()
-
- return self._termination
-
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- with self._condition:
- if initial_metadata is not None:
- self._up_initial_metadata = _Awaited(
- _Awaited.Kind.ARRIVED, initial_metadata)
- if payload is not None:
- if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
- self._up_initial_metadata = _ARRIVED_AND_NONE
- self._up_payload = payload
- self._up_allowance -= 1
- if completion is not None:
- if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
- self._up_initial_metadata = _ARRIVED_AND_NONE
- self._up_completion = _Awaited(
- _Awaited.Kind.ARRIVED, completion)
- if allowance is not None:
- if self._down_payload is not None:
- self._operator.advance(payload=self._down_payload)
- self._down_payload = None
- self._down_allowance += allowance - 1
- else:
- self._down_allowance += allowance
- self._condition.notify_all()
-
- def cancel(self):
- with self._condition:
- if self._operation_context is not None:
- self._operation_context.cancel()
- self._cancelled = True
- return False
-
- def cancelled(self):
- with self._condition:
- return self._cancelled
-
- def running(self):
- with self._condition:
- return not self._termination.terminated
-
- def done(self):
- with self._condition:
- return self._termination.terminated
-
- def result(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._condition:
- while True:
- if self._termination.terminated:
- if self._termination.abortion is None:
- return self._up_payload
- elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED:
- raise future.CancelledError()
- else:
- raise self._termination.abortion_error # pylint: disable=raising-bad-type
- else:
- _wait_once_until(self._condition, until)
-
- def exception(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._condition:
- while True:
- if self._termination.terminated:
- if self._termination.abortion is None:
- return None
- else:
- return self._termination.abortion_error
- else:
- _wait_once_until(self._condition, until)
-
- def traceback(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._condition:
- while True:
- if self._termination.terminated:
- if self._termination.abortion_error is None:
- return None
- else:
- abortion_error = self._termination.abortion_error
- break
- else:
- _wait_once_until(self._condition, until)
-
- try:
- raise abortion_error
- except face.AbortionError:
- return sys.exc_info()[2]
-
- def add_done_callback(self, fn):
- with self._condition:
- if self._operation_context is not None:
- outcome = self._operation_context.add_termination_callback(
- _done_callback_as_operation_termination_callback(fn, self))
- if outcome is None:
- return
- else:
- self._set_outcome(outcome)
-
- fn(self)
-
- def consume(self, value):
- with self._condition:
- while True:
- if self._termination.terminated:
- return
- elif 0 < self._down_allowance:
- self._operator.advance(payload=value)
- self._down_allowance -= 1
- return
- else:
- self._condition.wait()
-
- def terminate(self):
- with self._condition:
- if self._termination.terminated:
- return
- elif self._down_code.kind is _Transitory.Kind.GONE:
- # Conform to specified idempotence of terminate by ignoring extra calls.
- return
- else:
- completion = self._down_completion()
- self._operator.advance(completion=completion)
-
- def consume_and_terminate(self, value):
- with self._condition:
- while True:
- if self._termination.terminated:
- return
- elif 0 < self._down_allowance:
- completion = self._down_completion()
- self._operator.advance(payload=value, completion=completion)
- return
- else:
- self._condition.wait()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self.next()
-
- def next(self):
- with self._condition:
- while True:
- if self._termination.abortion_error is not None:
- raise self._termination.abortion_error
- elif self._up_payload is not None:
- payload = self._up_payload
- self._up_payload = None
- if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
- self._operator.advance(allowance=1)
- return payload
- elif self._up_completion.kind is _Awaited.Kind.ARRIVED:
- raise StopIteration()
- else:
- self._condition.wait()
-
- def is_active(self):
- with self._condition:
- return not self._termination.terminated
-
- def time_remaining(self):
- if self._operation_context is None:
- return 0
- else:
- return self._operation_context.time_remaining()
-
- def add_abortion_callback(self, abortion_callback):
- with self._condition:
- if self._operation_context is None:
- return self._termination.abortion
- else:
- outcome = self._operation_context.add_termination_callback(
- _abortion_callback_as_operation_termination_callback(
- abortion_callback, self.set_outcome))
- if outcome is not None:
- return self._set_outcome(outcome).abortion
- else:
- return self._termination.abortion
-
- def protocol_context(self):
- with self._condition:
- while True:
- if self._protocol_context.kind is _Awaited.Kind.ARRIVED:
- return self._protocol_context.value
- elif self._termination.abortion_error is not None:
- raise self._termination.abortion_error
- else:
- self._condition.wait()
-
- def initial_metadata(self):
- with self._condition:
- while True:
- if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED:
- return self._up_initial_metadata.value
- elif self._termination.terminated:
- return None
- else:
- self._condition.wait()
-
- def terminal_metadata(self):
- with self._condition:
- while True:
- if self._up_completion.kind is _Awaited.Kind.ARRIVED:
- return self._up_completion.value.terminal_metadata
- elif self._termination.terminated:
- return None
- else:
- self._condition.wait()
-
- def code(self):
- with self._condition:
- while True:
- if self._up_completion.kind is _Awaited.Kind.ARRIVED:
- return self._up_completion.value.code
- elif self._termination.terminated:
- return None
- else:
- self._condition.wait()
-
- def details(self):
- with self._condition:
- while True:
- if self._up_completion.kind is _Awaited.Kind.ARRIVED:
- return self._up_completion.value.message
- elif self._termination.terminated:
- return None
- else:
- self._condition.wait()
-
- def set_initial_metadata(self, initial_metadata):
- with self._condition:
- if (self._down_initial_metadata.kind is not
- _Transitory.Kind.NOT_YET_SEEN):
- raise ValueError(_CANNOT_SET_INITIAL_METADATA)
- else:
- self._down_initial_metadata = _GONE
- self._operator.advance(initial_metadata=initial_metadata)
-
- def set_terminal_metadata(self, terminal_metadata):
- with self._condition:
- if (self._down_terminal_metadata.kind is not
- _Transitory.Kind.NOT_YET_SEEN):
- raise ValueError(_CANNOT_SET_TERMINAL_METADATA)
- else:
- self._down_terminal_metadata = _Transitory(
- _Transitory.Kind.PRESENT, terminal_metadata)
-
- def set_code(self, code):
- with self._condition:
- if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN:
- raise ValueError(_CANNOT_SET_CODE)
- else:
- self._down_code = _Transitory(_Transitory.Kind.PRESENT, code)
-
- def set_details(self, details):
- with self._condition:
- if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN:
- raise ValueError(_CANNOT_SET_DETAILS)
- else:
- self._down_details = _Transitory(_Transitory.Kind.PRESENT, details)
-
- def set_protocol_context(self, protocol_context):
- with self._condition:
- self._protocol_context = _Awaited(
- _Awaited.Kind.ARRIVED, protocol_context)
- self._condition.notify_all()
-
- def set_outcome(self, outcome):
- with self._condition:
- return self._set_outcome(outcome)
-
-
-class _ProtocolReceiver(base.ProtocolReceiver):
-
- def __init__(self, rendezvous):
- self._rendezvous = rendezvous
-
- def context(self, protocol_context):
- self._rendezvous.set_protocol_context(protocol_context)
-
-
-def protocol_receiver(rendezvous):
- return _ProtocolReceiver(rendezvous)
-
-
-def pool_wrap(behavior, operation_context):
- """Wraps an operation-related behavior so that it may be called in a pool.
-
- Args:
- behavior: A callable related to carrying out an operation.
- operation_context: A base_interfaces.OperationContext for the operation.
-
- Returns:
- A callable that when called carries out the behavior of the given callable
- and handles whatever exceptions it raises appropriately.
- """
- def translation(*args):
- try:
- behavior(*args)
- except (
- abandonment.Abandoned,
- face.CancellationError,
- face.ExpirationError,
- face.LocalShutdownError,
- face.RemoteShutdownError,
- face.NetworkError,
- face.RemoteError,
- ) as e:
- if operation_context.outcome() is None:
- operation_context.fail(e)
- except Exception as e:
- operation_context.fail(e)
- return callable_util.with_exceptions_logged(
- translation, _INTERNAL_ERROR_LOG_MESSAGE)
diff --git a/src/python/grpcio/grpc/framework/crust/_service.py b/src/python/grpcio/grpc/framework/crust/_service.py
deleted file mode 100644
index 9903415c09..0000000000
--- a/src/python/grpcio/grpc/framework/crust/_service.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Behaviors for servicing RPCs."""
-
-from grpc.framework.crust import _control
-from grpc.framework.foundation import abandonment
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-
-class _ServicerContext(face.ServicerContext):
-
- def __init__(self, rendezvous):
- self._rendezvous = rendezvous
-
- def is_active(self):
- return self._rendezvous.is_active()
-
- def time_remaining(self):
- return self._rendezvous.time_remaining()
-
- def add_abortion_callback(self, abortion_callback):
- return self._rendezvous.add_abortion_callback(abortion_callback)
-
- def cancel(self):
- self._rendezvous.cancel()
-
- def protocol_context(self):
- return self._rendezvous.protocol_context()
-
- def invocation_metadata(self):
- return self._rendezvous.initial_metadata()
-
- def initial_metadata(self, initial_metadata):
- self._rendezvous.set_initial_metadata(initial_metadata)
-
- def terminal_metadata(self, terminal_metadata):
- self._rendezvous.set_terminal_metadata(terminal_metadata)
-
- def code(self, code):
- self._rendezvous.set_code(code)
-
- def details(self, details):
- self._rendezvous.set_details(details)
-
-
-def _adaptation(pool, in_pool):
- def adaptation(operator, operation_context):
- rendezvous = _control.Rendezvous(operator, operation_context)
- subscription = utilities.full_subscription(
- rendezvous, _control.protocol_receiver(rendezvous))
- outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
- if outcome is None:
- pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous)
- return subscription
- else:
- raise abandonment.Abandoned()
- return adaptation
-
-
-def adapt_inline_unary_unary(method, pool):
- def in_pool(rendezvous):
- request = next(rendezvous)
- response = method(request, _ServicerContext(rendezvous))
- rendezvous.consume_and_terminate(response)
- return _adaptation(pool, in_pool)
-
-
-def adapt_inline_unary_stream(method, pool):
- def in_pool(rendezvous):
- request = next(rendezvous)
- response_iterator = method(request, _ServicerContext(rendezvous))
- for response in response_iterator:
- rendezvous.consume(response)
- rendezvous.terminate()
- return _adaptation(pool, in_pool)
-
-
-def adapt_inline_stream_unary(method, pool):
- def in_pool(rendezvous):
- response = method(rendezvous, _ServicerContext(rendezvous))
- rendezvous.consume_and_terminate(response)
- return _adaptation(pool, in_pool)
-
-
-def adapt_inline_stream_stream(method, pool):
- def in_pool(rendezvous):
- response_iterator = method(rendezvous, _ServicerContext(rendezvous))
- for response in response_iterator:
- rendezvous.consume(response)
- rendezvous.terminate()
- return _adaptation(pool, in_pool)
-
-
-def adapt_event_unary_unary(method, pool):
- def in_pool(rendezvous):
- request = next(rendezvous)
- method(
- request, rendezvous.consume_and_terminate, _ServicerContext(rendezvous))
- return _adaptation(pool, in_pool)
-
-
-def adapt_event_unary_stream(method, pool):
- def in_pool(rendezvous):
- request = next(rendezvous)
- method(request, rendezvous, _ServicerContext(rendezvous))
- return _adaptation(pool, in_pool)
-
-
-def adapt_event_stream_unary(method, pool):
- def in_pool(rendezvous):
- request_consumer = method(
- rendezvous.consume_and_terminate, _ServicerContext(rendezvous))
- for request in rendezvous:
- request_consumer.consume(request)
- request_consumer.terminate()
- return _adaptation(pool, in_pool)
-
-
-def adapt_event_stream_stream(method, pool):
- def in_pool(rendezvous):
- request_consumer = method(rendezvous, _ServicerContext(rendezvous))
- for request in rendezvous:
- request_consumer.consume(request)
- request_consumer.terminate()
- return _adaptation(pool, in_pool)
-
-
-def adapt_multi_method(multi_method, pool):
- def adaptation(group, method, operator, operation_context):
- rendezvous = _control.Rendezvous(operator, operation_context)
- subscription = utilities.full_subscription(
- rendezvous, _control.protocol_receiver(rendezvous))
- outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
- if outcome is None:
- def in_pool():
- request_consumer = multi_method.service(
- group, method, rendezvous, _ServicerContext(rendezvous))
- for request in rendezvous:
- request_consumer.consume(request)
- request_consumer.terminate()
- pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous)
- return subscription
- else:
- raise abandonment.Abandoned()
- return adaptation
diff --git a/src/python/grpcio/grpc/framework/crust/implementations.py b/src/python/grpcio/grpc/framework/crust/implementations.py
deleted file mode 100644
index 2d3ab733b6..0000000000
--- a/src/python/grpcio/grpc/framework/crust/implementations.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Entry points into the Crust layer of RPC Framework."""
-
-import six
-
-from grpc.framework.common import cardinality
-from grpc.framework.common import style
-from grpc.framework.crust import _calls
-from grpc.framework.crust import _service
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.face import face
-
-
-class _BaseServicer(base.Servicer):
-
- def __init__(self, adapted_methods, adapted_multi_method):
- self._adapted_methods = adapted_methods
- self._adapted_multi_method = adapted_multi_method
-
- def service(self, group, method, context, output_operator):
- adapted_method = self._adapted_methods.get((group, method), None)
- if adapted_method is not None:
- return adapted_method(output_operator, context)
- elif self._adapted_multi_method is not None:
- try:
- return self._adapted_multi_method(
- group, method, output_operator, context)
- except face.NoSuchMethodError:
- raise base.NoSuchMethodError(None, None)
- else:
- raise base.NoSuchMethodError(None, None)
-
-
-class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
-
- def __init__(self, end, group, method, pool):
- self._end = end
- self._group = group
- self._method = method
- self._pool = pool
-
- def __call__(
- self, request, timeout, metadata=None, with_call=False,
- protocol_options=None):
- return _calls.blocking_unary_unary(
- self._end, self._group, self._method, timeout, with_call,
- protocol_options, metadata, request)
-
- def future(self, request, timeout, metadata=None, protocol_options=None):
- return _calls.future_unary_unary(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request)
-
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_unary_unary(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request, receiver, abortion_callback, self._pool)
-
-
-class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
-
- def __init__(self, end, group, method, pool):
- self._end = end
- self._group = group
- self._method = method
- self._pool = pool
-
- def __call__(self, request, timeout, metadata=None, protocol_options=None):
- return _calls.inline_unary_stream(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request)
-
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_unary_stream(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request, receiver, abortion_callback, self._pool)
-
-
-class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
-
- def __init__(self, end, group, method, pool):
- self._end = end
- self._group = group
- self._method = method
- self._pool = pool
-
- def __call__(
- self, request_iterator, timeout, metadata=None,
- with_call=False, protocol_options=None):
- return _calls.blocking_stream_unary(
- self._end, self._group, self._method, timeout, with_call,
- protocol_options, metadata, request_iterator, self._pool)
-
- def future(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- return _calls.future_stream_unary(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request_iterator, self._pool)
-
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- return _calls.event_stream_unary(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, receiver, abortion_callback, self._pool)
-
-
-class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
-
- def __init__(self, end, group, method, pool):
- self._end = end
- self._group = group
- self._method = method
- self._pool = pool
-
- def __call__(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- return _calls.inline_stream_stream(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, request_iterator, self._pool)
-
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- return _calls.event_stream_stream(
- self._end, self._group, self._method, timeout, protocol_options,
- metadata, receiver, abortion_callback, self._pool)
-
-
-class _GenericStub(face.GenericStub):
- """An face.GenericStub implementation."""
-
- def __init__(self, end, pool):
- self._end = end
- self._pool = pool
-
- def blocking_unary_unary(
- self, group, method, request, timeout, metadata=None,
- with_call=None, protocol_options=None):
- return _calls.blocking_unary_unary(
- self._end, group, method, timeout, with_call, protocol_options,
- metadata, request)
-
- def future_unary_unary(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- return _calls.future_unary_unary(
- self._end, group, method, timeout, protocol_options, metadata, request)
-
- def inline_unary_stream(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- return _calls.inline_unary_stream(
- self._end, group, method, timeout, protocol_options, metadata, request)
-
- def blocking_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- with_call=None, protocol_options=None):
- return _calls.blocking_stream_unary(
- self._end, group, method, timeout, with_call, protocol_options,
- metadata, request_iterator, self._pool)
-
- def future_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- return _calls.future_stream_unary(
- self._end, group, method, timeout, protocol_options, metadata,
- request_iterator, self._pool)
-
- def inline_stream_stream(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- return _calls.inline_stream_stream(
- self._end, group, method, timeout, protocol_options, metadata,
- request_iterator, self._pool)
-
- def event_unary_unary(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_unary_unary(
- self._end, group, method, timeout, protocol_options, metadata, request,
- receiver, abortion_callback, self._pool)
-
- def event_unary_stream(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_unary_stream(
- self._end, group, method, timeout, protocol_options, metadata, request,
- receiver, abortion_callback, self._pool)
-
- def event_stream_unary(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_stream_unary(
- self._end, group, method, timeout, protocol_options, metadata, receiver,
- abortion_callback, self._pool)
-
- def event_stream_stream(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- return _calls.event_stream_stream(
- self._end, group, method, timeout, protocol_options, metadata, receiver,
- abortion_callback, self._pool)
-
- def unary_unary(self, group, method):
- return _UnaryUnaryMultiCallable(self._end, group, method, self._pool)
-
- def unary_stream(self, group, method):
- return _UnaryStreamMultiCallable(self._end, group, method, self._pool)
-
- def stream_unary(self, group, method):
- return _StreamUnaryMultiCallable(self._end, group, method, self._pool)
-
- def stream_stream(self, group, method):
- return _StreamStreamMultiCallable(self._end, group, method, self._pool)
-
-
-class _DynamicStub(face.DynamicStub):
- """An face.DynamicStub implementation."""
-
- def __init__(self, end, group, cardinalities, pool):
- self._end = end
- self._group = group
- self._cardinalities = cardinalities
- self._pool = pool
-
- def __getattr__(self, attr):
- method_cardinality = self._cardinalities.get(attr)
- if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
- return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool)
- elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
- return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool)
- elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
- return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool)
- elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
- return _StreamStreamMultiCallable(
- self._end, self._group, attr, self._pool)
- else:
- raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr)
-
-
-def _adapt_method_implementations(method_implementations, pool):
- adapted_implementations = {}
- for name, method_implementation in six.iteritems(method_implementations):
- if method_implementation.style is style.Service.INLINE:
- if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
- adapted_implementations[name] = _service.adapt_inline_unary_unary(
- method_implementation.unary_unary_inline, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
- adapted_implementations[name] = _service.adapt_inline_unary_stream(
- method_implementation.unary_stream_inline, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
- adapted_implementations[name] = _service.adapt_inline_stream_unary(
- method_implementation.stream_unary_inline, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
- adapted_implementations[name] = _service.adapt_inline_stream_stream(
- method_implementation.stream_stream_inline, pool)
- elif method_implementation.style is style.Service.EVENT:
- if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
- adapted_implementations[name] = _service.adapt_event_unary_unary(
- method_implementation.unary_unary_event, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
- adapted_implementations[name] = _service.adapt_event_unary_stream(
- method_implementation.unary_stream_event, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
- adapted_implementations[name] = _service.adapt_event_stream_unary(
- method_implementation.stream_unary_event, pool)
- elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
- adapted_implementations[name] = _service.adapt_event_stream_stream(
- method_implementation.stream_stream_event, pool)
- return adapted_implementations
-
-
-def servicer(method_implementations, multi_method_implementation, pool):
- """Creates a base.Servicer.
-
- It is guaranteed that any passed face.MultiMethodImplementation will
- only be called to service an RPC if there is no
- face.MethodImplementation for the RPC method in the passed
- method_implementations dictionary.
-
- Args:
- method_implementations: A dictionary from RPC method name to
- face.MethodImplementation object to be used to service the named
- RPC method.
- multi_method_implementation: An face.MultiMethodImplementation to be
- used to service any RPCs not serviced by the
- face.MethodImplementations given in the method_implementations
- dictionary, or None.
- pool: A thread pool.
-
- Returns:
- A base.Servicer that services RPCs via the given implementations.
- """
- adapted_implementations = _adapt_method_implementations(
- method_implementations, pool)
- if multi_method_implementation is None:
- adapted_multi_method_implementation = None
- else:
- adapted_multi_method_implementation = _service.adapt_multi_method(
- multi_method_implementation, pool)
- return _BaseServicer(
- adapted_implementations, adapted_multi_method_implementation)
-
-
-def generic_stub(end, pool):
- """Creates an face.GenericStub.
-
- Args:
- end: A base.End.
- pool: A futures.ThreadPoolExecutor.
-
- Returns:
- A face.GenericStub that performs RPCs via the given base.End.
- """
- return _GenericStub(end, pool)
-
-
-def dynamic_stub(end, group, cardinalities, pool):
- """Creates an face.DynamicStub.
-
- Args:
- end: A base.End.
- group: The group identifier for all RPCs to be made with the created
- face.DynamicStub.
- cardinalities: A dict from method identifier to cardinality.Cardinality
- value identifying the cardinality of every RPC method to be supported by
- the created face.DynamicStub.
- pool: A futures.ThreadPoolExecutor.
-
- Returns:
- A face.DynamicStub that performs RPCs via the given base.End.
- """
- return _DynamicStub(end, group, cardinalities, pool)
diff --git a/src/python/grpcio/grpc/framework/foundation/_timer_future.py b/src/python/grpcio/grpc/framework/foundation/_timer_future.py
deleted file mode 100644
index 2c9996aa9d..0000000000
--- a/src/python/grpcio/grpc/framework/foundation/_timer_future.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Affords a Future implementation based on Python's threading.Timer."""
-
-import sys
-import threading
-import time
-
-from grpc.framework.foundation import future
-
-
-class TimerFuture(future.Future):
- """A Future implementation based around Timer objects."""
-
- def __init__(self, compute_time, computation):
- """Constructor.
-
- Args:
- compute_time: The time after which to begin this future's computation.
- computation: The computation to be performed within this Future.
- """
- self._lock = threading.Lock()
- self._compute_time = compute_time
- self._computation = computation
- self._timer = None
- self._computing = False
- self._computed = False
- self._cancelled = False
- self._result = None
- self._exception = None
- self._traceback = None
- self._waiting = []
-
- def _compute(self):
- """Performs the computation embedded in this Future.
-
- Or doesn't, if the time to perform it has not yet arrived.
- """
- with self._lock:
- time_remaining = self._compute_time - time.time()
- if 0 < time_remaining:
- self._timer = threading.Timer(time_remaining, self._compute)
- self._timer.start()
- return
- else:
- self._computing = True
-
- try:
- return_value = self._computation()
- exception = None
- traceback = None
- except Exception as e: # pylint: disable=broad-except
- return_value = None
- exception = e
- traceback = sys.exc_info()[2]
-
- with self._lock:
- self._computing = False
- self._computed = True
- self._return_value = return_value
- self._exception = exception
- self._traceback = traceback
- waiting = self._waiting
-
- for callback in waiting:
- callback(self)
-
- def start(self):
- """Starts this Future.
-
- This must be called exactly once, immediately after construction.
- """
- with self._lock:
- self._timer = threading.Timer(
- self._compute_time - time.time(), self._compute)
- self._timer.start()
-
- def cancel(self):
- """See future.Future.cancel for specification."""
- with self._lock:
- if self._computing or self._computed:
- return False
- elif self._cancelled:
- return True
- else:
- self._timer.cancel()
- self._cancelled = True
- waiting = self._waiting
-
- for callback in waiting:
- try:
- callback(self)
- except Exception: # pylint: disable=broad-except
- pass
-
- return True
-
- def cancelled(self):
- """See future.Future.cancelled for specification."""
- with self._lock:
- return self._cancelled
-
- def running(self):
- """See future.Future.running for specification."""
- with self._lock:
- return not self._computed and not self._cancelled
-
- def done(self):
- """See future.Future.done for specification."""
- with self._lock:
- return self._computed or self._cancelled
-
- def result(self, timeout=None):
- """See future.Future.result for specification."""
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- if self._exception is None:
- return self._return_value
- else:
- raise self._exception # pylint: disable=raising-bad-type
-
- condition = threading.Condition()
- def notify_condition(unused_future):
- with condition:
- condition.notify()
- self._waiting.append(notify_condition)
-
- with condition:
- condition.wait(timeout=timeout)
-
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- if self._exception is None:
- return self._return_value
- else:
- raise self._exception # pylint: disable=raising-bad-type
- else:
- raise future.TimeoutError()
-
- def exception(self, timeout=None):
- """See future.Future.exception for specification."""
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- return self._exception
-
- condition = threading.Condition()
- def notify_condition(unused_future):
- with condition:
- condition.notify()
- self._waiting.append(notify_condition)
-
- with condition:
- condition.wait(timeout=timeout)
-
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- return self._exception
- else:
- raise future.TimeoutError()
-
- def traceback(self, timeout=None):
- """See future.Future.traceback for specification."""
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- return self._traceback
-
- condition = threading.Condition()
- def notify_condition(unused_future):
- with condition:
- condition.notify()
- self._waiting.append(notify_condition)
-
- with condition:
- condition.wait(timeout=timeout)
-
- with self._lock:
- if self._cancelled:
- raise future.CancelledError()
- elif self._computed:
- return self._traceback
- else:
- raise future.TimeoutError()
-
- def add_done_callback(self, fn):
- """See future.Future.add_done_callback for specification."""
- with self._lock:
- if not self._computed and not self._cancelled:
- self._waiting.append(fn)
- return
-
- fn(self)
diff --git a/src/python/grpcio/grpc/framework/foundation/activated.py b/src/python/grpcio/grpc/framework/foundation/activated.py
deleted file mode 100644
index 8b8e4f45b5..0000000000
--- a/src/python/grpcio/grpc/framework/foundation/activated.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Interfaces related to streams of values or objects."""
-
-import abc
-
-import six
-
-class Activated(six.with_metaclass(abc.ABCMeta)):
- """Interface for objects that may be started and stopped.
-
- Values implementing this type must also implement the context manager
- protocol.
- """
-
- @abc.abstractmethod
- def __enter__(self):
- """See the context manager protocol for specification."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def __exit__(self, exc_type, exc_val, exc_tb):
- """See the context manager protocol for specification."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def start(self):
- """Activates this object.
-
- Returns:
- A value equal to the value returned by this object's __enter__ method.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def stop(self):
- """Deactivates this object."""
- raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/foundation/later.py b/src/python/grpcio/grpc/framework/foundation/later.py
deleted file mode 100644
index 1d1e065041..0000000000
--- a/src/python/grpcio/grpc/framework/foundation/later.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Enables scheduling execution at a later time."""
-
-import time
-
-from grpc.framework.foundation import _timer_future
-
-
-def later(delay, computation):
- """Schedules later execution of a callable.
-
- Args:
- delay: Any numeric value. Represents the minimum length of time in seconds
- to allow to pass before beginning the computation. No guarantees are made
- about the maximum length of time that will pass.
- computation: A callable that accepts no arguments.
-
- Returns:
- A Future representing the scheduled computation.
- """
- timer_future = _timer_future.TimerFuture(time.time() + delay, computation)
- timer_future.start()
- return timer_future
diff --git a/src/python/grpcio/grpc/framework/foundation/relay.py b/src/python/grpcio/grpc/framework/foundation/relay.py
deleted file mode 100644
index 20f41b2738..0000000000
--- a/src/python/grpcio/grpc/framework/foundation/relay.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementations of in-order work deference."""
-
-import abc
-import enum
-import threading
-
-from grpc.framework.foundation import activated
-from grpc.framework.foundation import logging_pool
-
-_NULL_BEHAVIOR = lambda unused_value: None
-
-
-class Relay(object):
- """Performs work submitted to it in another thread.
-
- Performs work in the order in which work was submitted to it; otherwise there
- would be no reason to use an implementation of this interface instead of a
- thread pool.
- """
-
- @abc.abstractmethod
- def add_value(self, value):
- """Adds a value to be passed to the behavior registered with this Relay.
-
- Args:
- value: A value that will be passed to a call made in another thread to the
- behavior registered with this Relay.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def set_behavior(self, behavior):
- """Sets the behavior that this Relay should call when passed values.
-
- Args:
- behavior: The behavior that this Relay should call in another thread when
- passed a value, or None to have passed values ignored.
- """
- raise NotImplementedError()
-
-
-class _PoolRelay(activated.Activated, Relay):
-
- @enum.unique
- class _State(enum.Enum):
- INACTIVE = 'inactive'
- IDLE = 'idle'
- SPINNING = 'spinning'
-
- def __init__(self, pool, behavior):
- self._condition = threading.Condition()
- self._pool = pool
- self._own_pool = pool is None
- self._state = _PoolRelay._State.INACTIVE
- self._activated = False
- self._spinning = False
- self._values = []
- self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
-
- def _spin(self, behavior, value):
- while True:
- behavior(value)
- with self._condition:
- if self._values:
- value = self._values.pop(0)
- behavior = self._behavior
- else:
- self._state = _PoolRelay._State.IDLE
- self._condition.notify_all()
- break
-
- def add_value(self, value):
- with self._condition:
- if self._state is _PoolRelay._State.INACTIVE:
- raise ValueError('add_value not valid on inactive Relay!')
- elif self._state is _PoolRelay._State.IDLE:
- self._pool.submit(self._spin, self._behavior, value)
- self._state = _PoolRelay._State.SPINNING
- else:
- self._values.append(value)
-
- def set_behavior(self, behavior):
- with self._condition:
- self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
-
- def _start(self):
- with self._condition:
- self._state = _PoolRelay._State.IDLE
- if self._own_pool:
- self._pool = logging_pool.pool(1)
- return self
-
- def _stop(self):
- with self._condition:
- while self._state is _PoolRelay._State.SPINNING:
- self._condition.wait()
- if self._own_pool:
- self._pool.shutdown(wait=True)
- self._state = _PoolRelay._State.INACTIVE
-
- def __enter__(self):
- return self._start()
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self._stop()
- return False
-
- def start(self):
- return self._start()
-
- def stop(self):
- self._stop()
-
-
-def relay(behavior):
- """Creates a Relay.
-
- Args:
- behavior: The behavior to be called by the created Relay, or None to have
- passed values dropped until a different behavior is given to the returned
- Relay later.
-
- Returns:
- An object that is both an activated.Activated and a Relay. The object is
- only valid for use as a Relay when activated.
- """
- return _PoolRelay(None, behavior)
-
-
-def pool_relay(pool, behavior):
- """Creates a Relay that uses a given thread pool.
-
- This object will make use of at most one thread in the given pool.
-
- Args:
- pool: A futures.ThreadPoolExecutor for use by the created Relay.
- behavior: The behavior to be called by the created Relay, or None to have
- passed values dropped until a different behavior is given to the returned
- Relay later.
-
- Returns:
- An object that is both an activated.Activated and a Relay. The object is
- only valid for use as a Relay when activated.
- """
- return _PoolRelay(pool, behavior)
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py b/src/python/grpcio/grpc/framework/interfaces/links/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/links.py b/src/python/grpcio/grpc/framework/interfaces/links/links.py
deleted file mode 100644
index 9631b19078..0000000000
--- a/src/python/grpcio/grpc/framework/interfaces/links/links.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The low-level ticket-exchanging-links interface of RPC Framework."""
-
-import abc
-import collections
-import enum
-
-import six
-
-
-class Protocol(collections.namedtuple('Protocol', ('kind', 'value',))):
- """A sum type for handles to a system that transmits tickets.
-
- Attributes:
- kind: A Kind value identifying the kind of value being passed.
- value: The value being passed between the high-level application and the
- system affording ticket transport.
- """
-
- @enum.unique
- class Kind(enum.Enum):
- CALL_OPTION = 'call option'
- SERVICER_CONTEXT = 'servicer context'
- INVOCATION_CONTEXT = 'invocation context'
-
-
-class Ticket(
- collections.namedtuple(
- 'Ticket',
- ('operation_id', 'sequence_number', 'group', 'method', 'subscription',
- 'timeout', 'allowance', 'initial_metadata', 'payload',
- 'terminal_metadata', 'code', 'message', 'termination', 'protocol',))):
- """A sum type for all values sent from a front to a back.
-
- Attributes:
- operation_id: A unique-with-respect-to-equality hashable object identifying
- a particular operation.
- sequence_number: A zero-indexed integer sequence number identifying the
- ticket's place in the stream of tickets sent in one direction for the
- particular operation.
- group: The group to which the method of the operation belongs. Must be
- present in the first ticket from invocation side to service side. Ignored
- for all other tickets exchanged during the operation.
- method: The name of an operation. Must be present in the first ticket from
- invocation side to service side. Ignored for all other tickets exchanged
- during the operation.
- subscription: A Subscription value describing the interest one side has in
- receiving information from the other side. Must be present in the first
- ticket from either side. Ignored for all other tickets exchanged during
- the operation.
- timeout: A nonzero length of time (measured from the beginning of the
- operation) to allow for the entire operation. Must be present in the first
- ticket from invocation side to service side. Optional for all other
- tickets exchanged during the operation. Receipt of a value from the other
- side of the operation indicates the value in use by that side. Setting a
- value on a later ticket allows either side to request time extensions (or
- even time reductions!) on in-progress operations.
- allowance: A positive integer granting permission for a number of payloads
- to be transmitted to the communicating side of the operation, or None if
- no additional allowance is being granted with this ticket.
- initial_metadata: An optional metadata value communicated from one side to
- the other at the beginning of the operation. May be non-None in at most
- one ticket from each side. Any non-None value must appear no later than
- the first payload value.
- payload: A customer payload object. May be None.
- terminal_metadata: A metadata value comminicated from one side to the other
- at the end of the operation. May be non-None in the same ticket as
- the code and message, but must be None for all earlier tickets.
- code: A value communicated at operation completion. May be None.
- message: A value communicated at operation completion. May be None.
- termination: A Termination value describing the end of the operation, or
- None if the operation has not yet terminated. If set, no further tickets
- may be sent in the same direction.
- protocol: A Protocol value or None, with further semantics being a matter
- between high-level application and underlying ticket transport.
- """
-
- @enum.unique
- class Subscription(enum.Enum):
- """Identifies the level of subscription of a side of an operation."""
-
- NONE = 'none'
- TERMINATION = 'termination'
- FULL = 'full'
-
- @enum.unique
- class Termination(enum.Enum):
- """Identifies the termination of an operation."""
-
- COMPLETION = 'completion'
- CANCELLATION = 'cancellation'
- EXPIRATION = 'expiration'
- SHUTDOWN = 'shutdown'
- RECEPTION_FAILURE = 'reception failure'
- TRANSMISSION_FAILURE = 'transmission failure'
- LOCAL_FAILURE = 'local failure'
- REMOTE_FAILURE = 'remote failure'
-
-
-class Link(six.with_metaclass(abc.ABCMeta)):
- """Accepts and emits tickets."""
-
- @abc.abstractmethod
- def accept_ticket(self, ticket):
- """Accept a Ticket.
-
- Args:
- ticket: Any Ticket.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def join_link(self, link):
- """Mates this object with a peer with which it will exchange tickets."""
- raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py b/src/python/grpcio/grpc/framework/interfaces/links/utilities.py
deleted file mode 100644
index 6e4fd76d93..0000000000
--- a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Utilities provided as part of the links interface."""
-
-from grpc.framework.interfaces.links import links
-
-
-class _NullLink(links.Link):
- """A do-nothing links.Link."""
-
- def accept_ticket(self, ticket):
- pass
-
- def join_link(self, link):
- pass
-
-NULL_LINK = _NullLink()
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index 0f4db9d972..ea38526a28 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -29,4 +29,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
-VERSION='0.16.0.dev0'
+VERSION='1.1.0.dev0'
diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py
index 7aa600728a..90f68a5741 100644
--- a/src/python/grpcio_tests/grpc_version.py
+++ b/src/python/grpcio_tests/grpc_version.py
@@ -29,4 +29,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
-VERSION='0.16.0.dev0'
+VERSION='1.1.0.dev0'
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index 86aa0495a2..97e6c9e27e 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -39,6 +39,7 @@ import time
from oauth2client import client as oauth2client_client
+import grpc
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.common import cardinality
@@ -57,12 +58,18 @@ class TestService(test_pb2.BetaTestServiceServicer):
return empty_pb2.Empty()
def UnaryCall(self, request, context):
+ if request.HasField('response_status'):
+ context.code(request.response_status.code)
+ context.details(request.response_status.message)
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
def StreamingOutputCall(self, request, context):
+ if request.HasField('response_status'):
+ context.code(request.response_status.code)
+ context.details(request.response_status.message)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
@@ -79,6 +86,9 @@ class TestService(test_pb2.BetaTestServiceServicer):
def FullDuplexCall(self, request_iterator, context):
for request in request_iterator:
+ if request.HasField('response_status'):
+ context.code(request.response_status.code)
+ context.details(request.response_status.message)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
@@ -289,6 +299,39 @@ def _empty_stream(stub):
pass
+def _status_code_and_message(stub):
+ with stub:
+ message = 'test status message'
+ code = 2
+ status = grpc.StatusCode.UNKNOWN # code = 2
+ request = messages_pb2.SimpleRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_size=1,
+ payload=messages_pb2.Payload(body=b'\x00'),
+ response_status=messages_pb2.EchoStatus(code=code, message=message)
+ )
+ response_future = stub.UnaryCall.future(request, _TIMEOUT)
+ if response_future.code() != status:
+ raise ValueError(
+ 'expected code %s, got %s' % (status, response_future.code()))
+ if response_future.details() != message:
+ raise ValueError(
+ 'expected message %s, got %s' % (message, response_future.details()))
+
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=1),),
+ response_status=messages_pb2.EchoStatus(code=code, message=message))
+ response_iterator = stub.StreamingOutputCall(request, _TIMEOUT)
+ if response_future.code() != status:
+ raise ValueError(
+ 'expected code %s, got %s' % (status, response_iterator.code()))
+ if response_future.details() != message:
+ raise ValueError(
+ 'expected message %s, got %s' % (message, response_iterator.details()))
+
+
def _compute_engine_creds(stub, args):
response = _large_unary_common_behavior(stub, True, True)
if args.default_service_account != response.username:
@@ -347,6 +390,7 @@ class TestCase(enum.Enum):
CANCEL_AFTER_BEGIN = 'cancel_after_begin'
CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
EMPTY_STREAM = 'empty_stream'
+ STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
JWT_TOKEN_CREDS = 'jwt_token_creds'
@@ -372,6 +416,8 @@ class TestCase(enum.Enum):
_timeout_on_sleeping_server(stub)
elif self is TestCase.EMPTY_STREAM:
_empty_stream(stub)
+ elif self is TestCase.STATUS_CODE_AND_MESSAGE:
+ _status_code_and_message(stub)
elif self is TestCase.COMPUTE_ENGINE_CREDS:
_compute_engine_creds(stub, args)
elif self is TestCase.OAUTH2_AUTH_TOKEN:
diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py
index 3abf0d08dd..2371ff0956 100644
--- a/src/python/grpcio_tests/tests/qps/qps_worker.py
+++ b/src/python/grpcio_tests/tests/qps/qps_worker.py
@@ -40,7 +40,7 @@ from tests.qps import worker_server
def run_worker_server(port):
- server = grpc.server((), futures.ThreadPoolExecutor(max_workers=5))
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
servicer = worker_server.WorkerServer()
services_pb2.add_WorkerServiceServicer_to_server(servicer, server)
server.add_insecure_port('[::]:{}'.format(port))
diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
index 932a1ffe2b..46d542940f 100644
--- a/src/python/grpcio_tests/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -82,7 +82,7 @@ class WorkerServer(services_pb2.WorkerServiceServicer):
server_threads = multiprocessing.cpu_count() * 5
else:
server_threads = config.async_server_threads
- server = grpc.server((), futures.ThreadPoolExecutor(
+ server = grpc.server(futures.ThreadPoolExecutor(
max_workers=server_threads))
if config.server_type == control_pb2.ASYNC_SERVER:
servicer = benchmark_server.BenchmarkServer()
diff --git a/src/python/grpcio_tests/tests/unit/_adapter/.gitignore b/src/python/grpcio_tests/tests/unit/_adapter/.gitignore
deleted file mode 100644
index a6f96cd6db..0000000000
--- a/src/python/grpcio_tests/tests/unit/_adapter/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.a
-*.so
-*.dll
-*.pyc
-*.pyd
diff --git a/src/python/grpcio_tests/tests/unit/_adapter/__init__.py b/src/python/grpcio_tests/tests/unit/_adapter/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio_tests/tests/unit/_adapter/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py b/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py
deleted file mode 100644
index 7a90eacf77..0000000000
--- a/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Test scenarios using protocol buffers."""
-
-import abc
-import threading
-
-import six
-
-from tests.unit._junkdrawer import math_pb2
-
-
-class ProtoScenario(six.with_metaclass(abc.ABCMeta)):
- """An RPC test scenario using protocol buffers."""
-
- @abc.abstractmethod
- def method(self):
- """Access the test method name.
-
- Returns:
- The test method name.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_request(self, request):
- """Serialize a request protocol buffer.
-
- Args:
- request: A request protocol buffer.
-
- Returns:
- The bytestring serialization of the given request protocol buffer.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_request(self, request_bytestring):
- """Deserialize a request protocol buffer.
-
- Args:
- request_bytestring: The bytestring serialization of a request protocol
- buffer.
-
- Returns:
- The request protocol buffer deserialized from the given byte string.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_response(self, response):
- """Serialize a response protocol buffer.
-
- Args:
- response: A response protocol buffer.
-
- Returns:
- The bytestring serialization of the given response protocol buffer.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_response(self, response_bytestring):
- """Deserialize a response protocol buffer.
-
- Args:
- response_bytestring: The bytestring serialization of a response protocol
- buffer.
-
- Returns:
- The response protocol buffer deserialized from the given byte string.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def requests(self):
- """Access the sequence of requests for this scenario.
-
- Returns:
- A sequence of request protocol buffers.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def response_for_request(self, request):
- """Access the response for a particular request.
-
- Args:
- request: A request protocol buffer.
-
- Returns:
- The response protocol buffer appropriate for the given request.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify_requests(self, experimental_requests):
- """Verify the requests transmitted through the system under test.
-
- Args:
- experimental_requests: The request protocol buffers transmitted through
- the system under test.
-
- Returns:
- True if the requests satisfy this test scenario; False otherwise.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify_responses(self, experimental_responses):
- """Verify the responses transmitted through the system under test.
-
- Args:
- experimental_responses: The response protocol buffers transmitted through
- the system under test.
-
- Returns:
- True if the responses satisfy this test scenario; False otherwise.
- """
- raise NotImplementedError()
-
-
-class EmptyScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- def method(self):
- return 'DivMany'
-
- def serialize_request(self, request):
- raise ValueError('This should not be necessary to call!')
-
- def deserialize_request(self, request_bytestring):
- raise ValueError('This should not be necessary to call!')
-
- def serialize_response(self, response):
- raise ValueError('This should not be necessary to call!')
-
- def deserialize_response(self, response_bytestring):
- raise ValueError('This should not be necessary to call!')
-
- def requests(self):
- return ()
-
- def response_for_request(self, request):
- raise ValueError('This should not be necessary to call!')
-
- def verify_requests(self, experimental_requests):
- return not experimental_requests
-
- def verify_responses(self, experimental_responses):
- return not experimental_responses
-
-
-class BidirectionallyUnaryScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- _DIVIDEND = 59
- _DIVISOR = 7
- _QUOTIENT = 8
- _REMAINDER = 3
-
- _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR)
- _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER)
-
- def method(self):
- return 'Div'
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, request_bytestring):
- return math_pb2.DivArgs.FromString(request_bytestring)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, response_bytestring):
- return math_pb2.DivReply.FromString(response_bytestring)
-
- def requests(self):
- return [self._REQUEST]
-
- def response_for_request(self, request):
- return self._RESPONSE
-
- def verify_requests(self, experimental_requests):
- return tuple(experimental_requests) == (self._REQUEST,)
-
- def verify_responses(self, experimental_responses):
- return tuple(experimental_responses) == (self._RESPONSE,)
-
-
-class BidirectionallyStreamingScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- _STREAM_LENGTH = 200
- _REQUESTS = tuple(
- math_pb2.DivArgs(dividend=59 + index, divisor=7 + index)
- for index in range(_STREAM_LENGTH))
-
- def __init__(self):
- self._lock = threading.Lock()
- self._responses = []
-
- def method(self):
- return 'DivMany'
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, request_bytestring):
- return math_pb2.DivArgs.FromString(request_bytestring)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, response_bytestring):
- return math_pb2.DivReply.FromString(response_bytestring)
-
- def requests(self):
- return self._REQUESTS
-
- def response_for_request(self, request):
- quotient, remainder = divmod(request.dividend, request.divisor)
- response = math_pb2.DivReply(quotient=quotient, remainder=remainder)
- with self._lock:
- self._responses.append(response)
- return response
-
- def verify_requests(self, experimental_requests):
- return tuple(experimental_requests) == self._REQUESTS
-
- def verify_responses(self, experimental_responses):
- with self._lock:
- return tuple(experimental_responses) == tuple(self._responses)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
index 9d1dbc189b..f9a8e2401b 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
@@ -281,8 +281,8 @@ class ServerClientMixin(object):
], server_call_tag)
self.assertEqual(cygrpc.CallError.ok, server_start_batch_result)
- client_event = client_event_future.result()
server_event = self.server_completion_queue.poll(cygrpc_deadline)
+ client_event = client_event_future.result()
self.assertEqual(6, len(client_event.batch_operations))
found_client_op_types = set()
diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
index 24a2faef85..b33802bf57 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
@@ -184,11 +184,11 @@ if __name__ == '__main__':
args = parser.parse_args()
if args.scenario == UNSTARTED_SERVER:
- server = grpc.server((), DaemonPool())
+ server = grpc.server(DaemonPool())
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == RUNNING_SERVER:
- server = grpc.server((), DaemonPool())
+ server = grpc.server(DaemonPool())
port = server.add_insecure_port('[::]:0')
server.start()
if args.wait_for_interrupt:
@@ -203,7 +203,7 @@ if __name__ == '__main__':
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY:
- server = grpc.server((), DaemonPool())
+ server = grpc.server(DaemonPool())
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
@@ -217,7 +217,7 @@ if __name__ == '__main__':
else:
handler = GenericHandler()
- server = grpc.server((), DaemonPool())
+ server = grpc.server(DaemonPool())
port = server.add_insecure_port('[::]:0')
server.add_generic_rpc_handlers((handler,))
server.start()
diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py
index b0d6af73e5..5a4a32887c 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_test.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_test.py
@@ -84,6 +84,7 @@ def wait(process):
process.wait()
+@unittest.skip('https://github.com/grpc/grpc/issues/7311')
class ExitTest(unittest.TestCase):
def test_unstarted_server(self):
diff --git a/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py
deleted file mode 100644
index 20165955b4..0000000000
--- a/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# TODO(nathaniel): Remove this from source control after having made
-# generation from the math.proto source part of GRPC's build-and-test
-# process.
-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: math.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='math.proto',
- package='math',
- serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_DIVARGS = _descriptor.Descriptor(
- name='DivArgs',
- full_name='math.DivArgs',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='dividend', full_name='math.DivArgs.dividend', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='divisor', full_name='math.DivArgs.divisor', index=1,
- number=2, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=20,
- serialized_end=64,
-)
-
-
-_DIVREPLY = _descriptor.Descriptor(
- name='DivReply',
- full_name='math.DivReply',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='quotient', full_name='math.DivReply.quotient', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='remainder', full_name='math.DivReply.remainder', index=1,
- number=2, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=66,
- serialized_end=113,
-)
-
-
-_FIBARGS = _descriptor.Descriptor(
- name='FibArgs',
- full_name='math.FibArgs',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='limit', full_name='math.FibArgs.limit', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=115,
- serialized_end=139,
-)
-
-
-_NUM = _descriptor.Descriptor(
- name='Num',
- full_name='math.Num',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='num', full_name='math.Num.num', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=141,
- serialized_end=159,
-)
-
-
-_FIBREPLY = _descriptor.Descriptor(
- name='FibReply',
- full_name='math.FibReply',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='count', full_name='math.FibReply.count', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=161,
- serialized_end=186,
-)
-
-DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS
-DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY
-DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS
-DESCRIPTOR.message_types_by_name['Num'] = _NUM
-DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY
-
-DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict(
- DESCRIPTOR = _DIVARGS,
- __module__ = 'math_pb2'
- # @@protoc_insertion_point(class_scope:math.DivArgs)
- ))
-_sym_db.RegisterMessage(DivArgs)
-
-DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict(
- DESCRIPTOR = _DIVREPLY,
- __module__ = 'math_pb2'
- # @@protoc_insertion_point(class_scope:math.DivReply)
- ))
-_sym_db.RegisterMessage(DivReply)
-
-FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict(
- DESCRIPTOR = _FIBARGS,
- __module__ = 'math_pb2'
- # @@protoc_insertion_point(class_scope:math.FibArgs)
- ))
-_sym_db.RegisterMessage(FibArgs)
-
-Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict(
- DESCRIPTOR = _NUM,
- __module__ = 'math_pb2'
- # @@protoc_insertion_point(class_scope:math.Num)
- ))
-_sym_db.RegisterMessage(Num)
-
-FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict(
- DESCRIPTOR = _FIBREPLY,
- __module__ = 'math_pb2'
- # @@protoc_insertion_point(class_scope:math.FibReply)
- ))
-_sym_db.RegisterMessage(FibReply)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/src/python/grpcio_tests/tests/unit/_links/__init__.py b/src/python/grpcio_tests/tests/unit/_links/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio_tests/tests/unit/_links/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py b/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py
deleted file mode 100644
index 50661085f9..0000000000
--- a/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Test scenarios using protocol buffers."""
-
-import abc
-import threading
-
-import six
-
-from tests.unit._junkdrawer import math_pb2
-from tests.unit.framework.common import test_constants
-
-
-class ProtoScenario(six.with_metaclass(abc.ABCMeta)):
- """An RPC test scenario using protocol buffers."""
-
- @abc.abstractmethod
- def group_and_method(self):
- """Access the test group and method.
-
- Returns:
- The test group and method as a pair.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_request(self, request):
- """Serialize a request protocol buffer.
-
- Args:
- request: A request protocol buffer.
-
- Returns:
- The bytestring serialization of the given request protocol buffer.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_request(self, request_bytestring):
- """Deserialize a request protocol buffer.
-
- Args:
- request_bytestring: The bytestring serialization of a request protocol
- buffer.
-
- Returns:
- The request protocol buffer deserialized from the given byte string.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_response(self, response):
- """Serialize a response protocol buffer.
-
- Args:
- response: A response protocol buffer.
-
- Returns:
- The bytestring serialization of the given response protocol buffer.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_response(self, response_bytestring):
- """Deserialize a response protocol buffer.
-
- Args:
- response_bytestring: The bytestring serialization of a response protocol
- buffer.
-
- Returns:
- The response protocol buffer deserialized from the given byte string.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def requests(self):
- """Access the sequence of requests for this scenario.
-
- Returns:
- A sequence of request protocol buffers.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def response_for_request(self, request):
- """Access the response for a particular request.
-
- Args:
- request: A request protocol buffer.
-
- Returns:
- The response protocol buffer appropriate for the given request.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify_requests(self, experimental_requests):
- """Verify the requests transmitted through the system under test.
-
- Args:
- experimental_requests: The request protocol buffers transmitted through
- the system under test.
-
- Returns:
- True if the requests satisfy this test scenario; False otherwise.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def verify_responses(self, experimental_responses):
- """Verify the responses transmitted through the system under test.
-
- Args:
- experimental_responses: The response protocol buffers transmitted through
- the system under test.
-
- Returns:
- True if the responses satisfy this test scenario; False otherwise.
- """
- raise NotImplementedError()
-
-
-class EmptyScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- def group_and_method(self):
- return 'math.Math', 'DivMany'
-
- def serialize_request(self, request):
- raise ValueError('This should not be necessary to call!')
-
- def deserialize_request(self, request_bytestring):
- raise ValueError('This should not be necessary to call!')
-
- def serialize_response(self, response):
- raise ValueError('This should not be necessary to call!')
-
- def deserialize_response(self, response_bytestring):
- raise ValueError('This should not be necessary to call!')
-
- def requests(self):
- return ()
-
- def response_for_request(self, request):
- raise ValueError('This should not be necessary to call!')
-
- def verify_requests(self, experimental_requests):
- return not experimental_requests
-
- def verify_responses(self, experimental_responses):
- return not experimental_responses
-
-
-class BidirectionallyUnaryScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- _DIVIDEND = 59
- _DIVISOR = 7
- _QUOTIENT = 8
- _REMAINDER = 3
-
- _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR)
- _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER)
-
- def group_and_method(self):
- return 'math.Math', 'Div'
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, request_bytestring):
- return math_pb2.DivArgs.FromString(request_bytestring)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, response_bytestring):
- return math_pb2.DivReply.FromString(response_bytestring)
-
- def requests(self):
- return [self._REQUEST]
-
- def response_for_request(self, request):
- return self._RESPONSE
-
- def verify_requests(self, experimental_requests):
- return tuple(experimental_requests) == (self._REQUEST,)
-
- def verify_responses(self, experimental_responses):
- return tuple(experimental_responses) == (self._RESPONSE,)
-
-
-class BidirectionallyStreamingScenario(ProtoScenario):
- """A scenario that transmits no protocol buffers in either direction."""
-
- _REQUESTS = tuple(
- math_pb2.DivArgs(dividend=59 + index, divisor=7 + index)
- for index in range(test_constants.STREAM_LENGTH))
-
- def __init__(self):
- self._lock = threading.Lock()
- self._responses = []
-
- def group_and_method(self):
- return 'math.Math', 'DivMany'
-
- def serialize_request(self, request):
- return request.SerializeToString()
-
- def deserialize_request(self, request_bytestring):
- return math_pb2.DivArgs.FromString(request_bytestring)
-
- def serialize_response(self, response):
- return response.SerializeToString()
-
- def deserialize_response(self, response_bytestring):
- return math_pb2.DivReply.FromString(response_bytestring)
-
- def requests(self):
- return self._REQUESTS
-
- def response_for_request(self, request):
- quotient, remainder = divmod(request.dividend, request.divisor)
- response = math_pb2.DivReply(quotient=quotient, remainder=remainder)
- with self._lock:
- self._responses.append(response)
- return response
-
- def verify_requests(self, experimental_requests):
- return tuple(experimental_requests) == self._REQUESTS
-
- def verify_responses(self, experimental_responses):
- with self._lock:
- return tuple(experimental_responses) == tuple(self._responses)
diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py
index 59bf240d28..ab6546bf87 100644
--- a/src/python/grpcio_tests/tests/unit/_rpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py
@@ -233,7 +233,11 @@ class RPCTest(unittest.TestCase):
('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
+ self.assertIsInstance(response_future, grpc.Future)
+ self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
@@ -287,6 +291,8 @@ class RPCTest(unittest.TestCase):
response = response_future.result()
self.assertEqual(expected_response, response)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
@@ -459,6 +465,10 @@ class RPCTest(unittest.TestCase):
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.exception()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.traceback()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
def testCancelledUnaryRequestStreamResponse(self):
@@ -495,6 +505,10 @@ class RPCTest(unittest.TestCase):
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.exception()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertIsNotNone(response_future.details())
@@ -528,6 +542,7 @@ class RPCTest(unittest.TestCase):
request, timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
+ self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
@@ -556,6 +571,7 @@ class RPCTest(unittest.TestCase):
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code())
@@ -585,6 +601,8 @@ class RPCTest(unittest.TestCase):
request_iterator, timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
+ self.assertIsInstance(exception_context.exception, grpc.RpcError)
+ self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
@@ -601,6 +619,8 @@ class RPCTest(unittest.TestCase):
response_future = multi_callable.future(
request_iterator, timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
+ with self.assertRaises(grpc.FutureTimeoutError):
+ response_future.result(timeout=test_constants.SHORT_TIMEOUT / 2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
@@ -610,6 +630,7 @@ class RPCTest(unittest.TestCase):
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
@@ -656,11 +677,14 @@ class RPCTest(unittest.TestCase):
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
+ self.assertIsInstance(response_future, grpc.Future)
+ self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(
grpc.StatusCode.UNKNOWN, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
@@ -709,6 +733,7 @@ class RPCTest(unittest.TestCase):
self.assertIs(
grpc.StatusCode.UNKNOWN, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
diff --git a/src/python/grpcio_tests/tests/unit/framework/core/__init__.py b/src/python/grpcio_tests/tests/unit/framework/core/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/core/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py
deleted file mode 100644
index 0eb38abf22..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py
+++ /dev/null
@@ -1,570 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-from __future__ import division
-
-import abc
-import collections
-import enum
-import random # pylint: disable=unused-import
-import threading
-import time
-
-import six
-
-from grpc.framework.interfaces.base import base
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.base import _sequence
-from tests.unit.framework.interfaces.base import _state
-from tests.unit.framework.interfaces.base import test_interfaces # pylint: disable=unused-import
-
-_GROUP = 'base test cases test group'
-_METHOD = 'base test cases test method'
-
-_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE = test_constants.PAYLOAD_SIZE // 20
-_MINIMUM_PAYLOAD_SIZE = test_constants.PAYLOAD_SIZE // 600
-
-
-def _create_payload(randomness):
- length = randomness.randint(
- _MINIMUM_PAYLOAD_SIZE, test_constants.PAYLOAD_SIZE)
- random_section_length = randomness.randint(
- 0, min(_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE, length))
- random_section = bytes(
- bytearray(
- randomness.getrandbits(8) for _ in range(random_section_length)))
- sevens_section = b'\x07' * (length - random_section_length)
- return b''.join(randomness.sample((random_section, sevens_section), 2))
-
-
-def _anything_in_flight(state):
- return (
- state.invocation_initial_metadata_in_flight is not None or
- state.invocation_payloads_in_flight or
- state.invocation_completion_in_flight is not None or
- state.service_initial_metadata_in_flight is not None or
- state.service_payloads_in_flight or
- state.service_completion_in_flight is not None or
- 0 < state.invocation_allowance_in_flight or
- 0 < state.service_allowance_in_flight
- )
-
-
-def _verify_service_advance_and_update_state(
- initial_metadata, payload, completion, allowance, state, implementation):
- if initial_metadata is not None:
- if state.invocation_initial_metadata_received:
- return 'Later invocation initial metadata received: %s' % (
- initial_metadata,)
- if state.invocation_payloads_received:
- return 'Invocation initial metadata received after payloads: %s' % (
- state.invocation_payloads_received)
- if state.invocation_completion_received:
- return 'Invocation initial metadata received after invocation completion!'
- if not implementation.metadata_transmitted(
- state.invocation_initial_metadata_in_flight, initial_metadata):
- return 'Invocation initial metadata maltransmitted: %s, %s' % (
- state.invocation_initial_metadata_in_flight, initial_metadata)
- else:
- state.invocation_initial_metadata_in_flight = None
- state.invocation_initial_metadata_received = True
-
- if payload is not None:
- if state.invocation_completion_received:
- return 'Invocation payload received after invocation completion!'
- elif not state.invocation_payloads_in_flight:
- return 'Invocation payload "%s" received but not in flight!' % (payload,)
- elif state.invocation_payloads_in_flight[0] != payload:
- return 'Invocation payload mismatch: %s, %s' % (
- state.invocation_payloads_in_flight[0], payload)
- elif state.service_side_invocation_allowance < 1:
- return 'Disallowed invocation payload!'
- else:
- state.invocation_payloads_in_flight.pop(0)
- state.invocation_payloads_received += 1
- state.service_side_invocation_allowance -= 1
-
- if completion is not None:
- if state.invocation_completion_received:
- return 'Later invocation completion received: %s' % (completion,)
- elif not implementation.completion_transmitted(
- state.invocation_completion_in_flight, completion):
- return 'Invocation completion maltransmitted: %s, %s' % (
- state.invocation_completion_in_flight, completion)
- else:
- state.invocation_completion_in_flight = None
- state.invocation_completion_received = True
-
- if allowance is not None:
- if allowance <= 0:
- return 'Illegal allowance value: %s' % (allowance,)
- else:
- state.service_allowance_in_flight -= allowance
- state.service_side_service_allowance += allowance
-
-
-def _verify_invocation_advance_and_update_state(
- initial_metadata, payload, completion, allowance, state, implementation):
- if initial_metadata is not None:
- if state.service_initial_metadata_received:
- return 'Later service initial metadata received: %s' % (initial_metadata,)
- if state.service_payloads_received:
- return 'Service initial metadata received after service payloads: %s' % (
- state.service_payloads_received)
- if state.service_completion_received:
- return 'Service initial metadata received after service completion!'
- if not implementation.metadata_transmitted(
- state.service_initial_metadata_in_flight, initial_metadata):
- return 'Service initial metadata maltransmitted: %s, %s' % (
- state.service_initial_metadata_in_flight, initial_metadata)
- else:
- state.service_initial_metadata_in_flight = None
- state.service_initial_metadata_received = True
-
- if payload is not None:
- if state.service_completion_received:
- return 'Service payload received after service completion!'
- elif not state.service_payloads_in_flight:
- return 'Service payload "%s" received but not in flight!' % (payload,)
- elif state.service_payloads_in_flight[0] != payload:
- return 'Service payload mismatch: %s, %s' % (
- state.invocation_payloads_in_flight[0], payload)
- elif state.invocation_side_service_allowance < 1:
- return 'Disallowed service payload!'
- else:
- state.service_payloads_in_flight.pop(0)
- state.service_payloads_received += 1
- state.invocation_side_service_allowance -= 1
-
- if completion is not None:
- if state.service_completion_received:
- return 'Later service completion received: %s' % (completion,)
- elif not implementation.completion_transmitted(
- state.service_completion_in_flight, completion):
- return 'Service completion maltransmitted: %s, %s' % (
- state.service_completion_in_flight, completion)
- else:
- state.service_completion_in_flight = None
- state.service_completion_received = True
-
- if allowance is not None:
- if allowance <= 0:
- return 'Illegal allowance value: %s' % (allowance,)
- else:
- state.invocation_allowance_in_flight -= allowance
- state.invocation_side_service_allowance += allowance
-
-
-class Invocation(
- collections.namedtuple(
- 'Invocation',
- ('group', 'method', 'subscription_kind', 'timeout', 'initial_metadata',
- 'payload', 'completion',))):
- """A description of operation invocation.
-
- Attributes:
- group: The group identifier for the operation.
- method: The method identifier for the operation.
- subscription_kind: A base.Subscription.Kind value describing the kind of
- subscription to use for the operation.
- timeout: A duration in seconds to pass as the timeout value for the
- operation.
- initial_metadata: An object to pass as the initial metadata for the
- operation or None.
- payload: An object to pass as a payload value for the operation or None.
- completion: An object to pass as a completion value for the operation or
- None.
- """
-
-
-class OnAdvance(
- collections.namedtuple(
- 'OnAdvance',
- ('kind', 'initial_metadata', 'payload', 'completion', 'allowance'))):
- """Describes action to be taken in a test in response to an advance call.
-
- Attributes:
- kind: A Kind value describing the overall kind of response.
- initial_metadata: An initial metadata value to pass to a call of the advance
- method of the operator under test. Only valid if kind is Kind.ADVANCE and
- may be None.
- payload: A payload value to pass to a call of the advance method of the
- operator under test. Only valid if kind is Kind.ADVANCE and may be None.
- completion: A base.Completion value to pass to a call of the advance method
- of the operator under test. Only valid if kind is Kind.ADVANCE and may be
- None.
- allowance: An allowance value to pass to a call of the advance method of the
- operator under test. Only valid if kind is Kind.ADVANCE and may be None.
- """
-
- @enum.unique
- class Kind(enum.Enum):
- ADVANCE = 'advance'
- DEFECT = 'defect'
- IDLE = 'idle'
-
-
-_DEFECT_ON_ADVANCE = OnAdvance(OnAdvance.Kind.DEFECT, None, None, None, None)
-_IDLE_ON_ADVANCE = OnAdvance(OnAdvance.Kind.IDLE, None, None, None, None)
-
-
-class Instruction(
- collections.namedtuple(
- 'Instruction',
- ('kind', 'advance_args', 'advance_kwargs', 'conclude_success',
- 'conclude_message', 'conclude_invocation_outcome_kind',
- 'conclude_service_outcome_kind',))):
- """"""
-
- @enum.unique
- class Kind(enum.Enum):
- ADVANCE = 'ADVANCE'
- CANCEL = 'CANCEL'
- CONCLUDE = 'CONCLUDE'
-
-
-class Controller(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def failed(self, message):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_request(self, request):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_request(self, serialized_request):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def serialize_response(self, response):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def deserialize_response(self, serialized_response):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def invocation(self):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def poll(self):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def on_service_advance(
- self, initial_metadata, payload, completion, allowance):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def on_invocation_advance(
- self, initial_metadata, payload, completion, allowance):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def service_on_termination(self, outcome):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def invocation_on_termination(self, outcome):
- """"""
- raise NotImplementedError()
-
-
-class ControllerCreator(six.with_metaclass(abc.ABCMeta)):
-
- @abc.abstractmethod
- def name(self):
- """"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def controller(self, implementation, randomness):
- """"""
- raise NotImplementedError()
-
-
-class _Remainder(
- collections.namedtuple(
- '_Remainder',
- ('invocation_payloads', 'service_payloads', 'invocation_completion',
- 'service_completion',))):
- """Describes work remaining to be done in a portion of a test.
-
- Attributes:
- invocation_payloads: The number of payloads to be sent from the invocation
- side of the operation to the service side of the operation.
- service_payloads: The number of payloads to be sent from the service side of
- the operation to the invocation side of the operation.
- invocation_completion: Whether or not completion from the invocation side of
- the operation should be indicated and has yet to be indicated.
- service_completion: Whether or not completion from the service side of the
- operation should be indicated and has yet to be indicated.
- """
-
-
-class _SequenceController(Controller):
-
- def __init__(self, sequence, implementation, randomness):
- """Constructor.
-
- Args:
- sequence: A _sequence.Sequence describing the steps to be taken in the
- test at a relatively high level.
- implementation: A test_interfaces.Implementation encapsulating the
- base interface implementation that is the system under test.
- randomness: A random.Random instance for use in the test.
- """
- self._condition = threading.Condition()
- self._sequence = sequence
- self._implementation = implementation
- self._randomness = randomness
-
- self._until = None
- self._remaining_elements = None
- self._poll_next = None
- self._message = None
-
- self._state = _state.OperationState()
- self._todo = None
-
- # called with self._condition
- def _failed(self, message):
- self._message = message
- self._condition.notify_all()
-
- def _passed(self, invocation_outcome, service_outcome):
- self._poll_next = Instruction(
- Instruction.Kind.CONCLUDE, None, None, True, None, invocation_outcome,
- service_outcome)
- self._condition.notify_all()
-
- def failed(self, message):
- with self._condition:
- self._failed(message)
-
- def serialize_request(self, request):
- return request + request
-
- def deserialize_request(self, serialized_request):
- return serialized_request[:len(serialized_request) // 2]
-
- def serialize_response(self, response):
- return response * 3
-
- def deserialize_response(self, serialized_response):
- return serialized_response[2 * len(serialized_response) // 3:]
-
- def invocation(self):
- with self._condition:
- self._until = time.time() + self._sequence.maximum_duration
- self._remaining_elements = list(self._sequence.elements)
- if self._sequence.invocation.initial_metadata:
- initial_metadata = self._implementation.invocation_initial_metadata()
- self._state.invocation_initial_metadata_in_flight = initial_metadata
- else:
- initial_metadata = None
- if self._sequence.invocation.payload:
- payload = _create_payload(self._randomness)
- self._state.invocation_payloads_in_flight.append(payload)
- else:
- payload = None
- if self._sequence.invocation.complete:
- completion = self._implementation.invocation_completion()
- self._state.invocation_completion_in_flight = completion
- else:
- completion = None
- return Invocation(
- _GROUP, _METHOD, base.Subscription.Kind.FULL,
- self._sequence.invocation.timeout, initial_metadata, payload,
- completion)
-
- def poll(self):
- with self._condition:
- while True:
- if self._message is not None:
- return Instruction(
- Instruction.Kind.CONCLUDE, None, None, False, self._message, None,
- None)
- elif self._poll_next:
- poll_next = self._poll_next
- self._poll_next = None
- return poll_next
- elif self._until < time.time():
- return Instruction(
- Instruction.Kind.CONCLUDE, None, None, False,
- 'overran allotted time!', None, None)
- else:
- self._condition.wait(timeout=self._until-time.time())
-
- def on_service_advance(
- self, initial_metadata, payload, completion, allowance):
- with self._condition:
- message = _verify_service_advance_and_update_state(
- initial_metadata, payload, completion, allowance, self._state,
- self._implementation)
- if message is not None:
- self._failed(message)
- if self._todo is not None:
- raise ValueError('TODO!!!')
- elif _anything_in_flight(self._state):
- return _IDLE_ON_ADVANCE
- elif self._remaining_elements:
- element = self._remaining_elements.pop(0)
- if element.kind is _sequence.Element.Kind.SERVICE_TRANSMISSION:
- if element.transmission.initial_metadata:
- initial_metadata = self._implementation.service_initial_metadata()
- self._state.service_initial_metadata_in_flight = initial_metadata
- else:
- initial_metadata = None
- if element.transmission.payload:
- payload = _create_payload(self._randomness)
- self._state.service_payloads_in_flight.append(payload)
- self._state.service_side_service_allowance -= 1
- else:
- payload = None
- if element.transmission.complete:
- completion = self._implementation.service_completion()
- self._state.service_completion_in_flight = completion
- else:
- completion = None
- if (not self._state.invocation_completion_received and
- 0 <= self._state.service_side_invocation_allowance):
- allowance = 1
- self._state.service_side_invocation_allowance += 1
- self._state.invocation_allowance_in_flight += 1
- else:
- allowance = None
- return OnAdvance(
- OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion,
- allowance)
- else:
- raise ValueError('TODO!!!')
- else:
- return _IDLE_ON_ADVANCE
-
- def on_invocation_advance(
- self, initial_metadata, payload, completion, allowance):
- with self._condition:
- message = _verify_invocation_advance_and_update_state(
- initial_metadata, payload, completion, allowance, self._state,
- self._implementation)
- if message is not None:
- self._failed(message)
- if self._todo is not None:
- raise ValueError('TODO!!!')
- elif _anything_in_flight(self._state):
- return _IDLE_ON_ADVANCE
- elif self._remaining_elements:
- element = self._remaining_elements.pop(0)
- if element.kind is _sequence.Element.Kind.INVOCATION_TRANSMISSION:
- if element.transmission.initial_metadata:
- initial_metadata = self._implementation.invocation_initial_metadata()
- self._state.invocation_initial_metadata_in_fight = initial_metadata
- else:
- initial_metadata = None
- if element.transmission.payload:
- payload = _create_payload(self._randomness)
- self._state.invocation_payloads_in_flight.append(payload)
- self._state.invocation_side_invocation_allowance -= 1
- else:
- payload = None
- if element.transmission.complete:
- completion = self._implementation.invocation_completion()
- self._state.invocation_completion_in_flight = completion
- else:
- completion = None
- if (not self._state.service_completion_received and
- 0 <= self._state.invocation_side_service_allowance):
- allowance = 1
- self._state.invocation_side_service_allowance += 1
- self._state.service_allowance_in_flight += 1
- else:
- allowance = None
- return OnAdvance(
- OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion,
- allowance)
- else:
- raise ValueError('TODO!!!')
- else:
- return _IDLE_ON_ADVANCE
-
- def service_on_termination(self, outcome):
- with self._condition:
- self._state.service_side_outcome = outcome
- if self._todo is not None or self._remaining_elements:
- self._failed('Premature service-side outcome %s!' % (outcome,))
- elif outcome.kind is not self._sequence.outcome_kinds.service:
- self._failed(
- 'Incorrect service-side outcome kind: %s should have been %s' % (
- outcome.kind, self._sequence.outcome_kinds.service))
- elif self._state.invocation_side_outcome is not None:
- self._passed(self._state.invocation_side_outcome.kind, outcome.kind)
-
- def invocation_on_termination(self, outcome):
- with self._condition:
- self._state.invocation_side_outcome = outcome
- if self._todo is not None or self._remaining_elements:
- self._failed('Premature invocation-side outcome %s!' % (outcome,))
- elif outcome.kind is not self._sequence.outcome_kinds.invocation:
- self._failed(
- 'Incorrect invocation-side outcome kind: %s should have been %s' % (
- outcome.kind, self._sequence.outcome_kinds.invocation))
- elif self._state.service_side_outcome is not None:
- self._passed(outcome.kind, self._state.service_side_outcome.kind)
-
-
-class _SequenceControllerCreator(ControllerCreator):
-
- def __init__(self, sequence):
- self._sequence = sequence
-
- def name(self):
- return self._sequence.name
-
- def controller(self, implementation, randomness):
- return _SequenceController(self._sequence, implementation, randomness)
-
-
-CONTROLLER_CREATORS = tuple(
- _SequenceControllerCreator(sequence) for sequence in _sequence.SEQUENCES)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py
deleted file mode 100644
index 571d0e1e63..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-import collections
-import enum
-
-from grpc.framework.interfaces.base import base
-from tests.unit.framework.common import test_constants
-
-
-class Invocation(
- collections.namedtuple(
- 'Invocation', ('timeout', 'initial_metadata', 'payload', 'complete',))):
- """A recipe for operation invocation.
-
- Attributes:
- timeout: A duration in seconds to pass to the system under test as the
- operation's timeout value.
- initial_metadata: A boolean indicating whether or not to pass initial
- metadata when invoking the operation.
- payload: A boolean indicating whether or not to pass a payload when
- invoking the operation.
- complete: A boolean indicating whether or not to indicate completion of
- transmissions from the invoking side of the operation when invoking the
- operation.
- """
-
-
-class Transmission(
- collections.namedtuple(
- 'Transmission', ('initial_metadata', 'payload', 'complete',))):
- """A recipe for a single transmission in an operation.
-
- Attributes:
- initial_metadata: A boolean indicating whether or not to pass initial
- metadata as part of the transmission.
- payload: A boolean indicating whether or not to pass a payload as part of
- the transmission.
- complete: A boolean indicating whether or not to indicate completion of
- transmission from the transmitting side of the operation as part of the
- transmission.
- """
-
-
-class Intertransmission(
- collections.namedtuple('Intertransmission', ('invocation', 'service',))):
- """A recipe for multiple transmissions in an operation.
-
- Attributes:
- invocation: An integer describing the number of payloads to send from the
- invocation side of the operation to the service side.
- service: An integer describing the number of payloads to send from the
- service side of the operation to the invocation side.
- """
-
-
-class Element(collections.namedtuple('Element', ('kind', 'transmission',))):
- """A sum type for steps to perform when testing an operation.
-
- Attributes:
- kind: A Kind value describing the kind of step to perform in the test.
- transmission: Only valid for kinds Kind.INVOCATION_TRANSMISSION and
- Kind.SERVICE_TRANSMISSION, a Transmission value describing the details of
- the transmission to be made.
- """
-
- @enum.unique
- class Kind(enum.Enum):
- INVOCATION_TRANSMISSION = 'invocation transmission'
- SERVICE_TRANSMISSION = 'service transmission'
- INTERTRANSMISSION = 'intertransmission'
- INVOCATION_CANCEL = 'invocation cancel'
- SERVICE_CANCEL = 'service cancel'
- INVOCATION_FAILURE = 'invocation failure'
- SERVICE_FAILURE = 'service failure'
-
-
-class OutcomeKinds(
- collections.namedtuple('Outcome', ('invocation', 'service',))):
- """A description of the expected outcome of an operation test.
-
- Attributes:
- invocation: The base.Outcome.Kind value expected on the invocation side of
- the operation.
- service: The base.Outcome.Kind value expected on the service side of the
- operation.
- """
-
-
-class Sequence(
- collections.namedtuple(
- 'Sequence',
- ('name', 'maximum_duration', 'invocation', 'elements',
- 'outcome_kinds',))):
- """Describes at a high level steps to perform in a test.
-
- Attributes:
- name: The string name of the sequence.
- maximum_duration: A length of time in seconds to allow for the test before
- declaring it to have failed.
- invocation: An Invocation value describing how to invoke the operation
- under test.
- elements: A sequence of Element values describing at coarse granularity
- actions to take during the operation under test.
- outcome_kinds: An OutcomeKinds value describing the expected outcome kinds
- of the test.
- """
-
-_EASY = Sequence(
- 'Easy',
- test_constants.TIME_ALLOWANCE,
- Invocation(test_constants.LONG_TIMEOUT, True, True, True),
- (
- Element(
- Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, True)),
- ),
- OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED))
-
-_PEASY = Sequence(
- 'Peasy',
- test_constants.TIME_ALLOWANCE,
- Invocation(test_constants.LONG_TIMEOUT, True, True, False),
- (
- Element(
- Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, False)),
- Element(
- Element.Kind.INVOCATION_TRANSMISSION,
- Transmission(False, True, True)),
- Element(
- Element.Kind.SERVICE_TRANSMISSION, Transmission(False, True, True)),
- ),
- OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED))
-
-
-# TODO(issue 2959): Finish this test suite. This tuple of sequences should
-# contain at least the values in the Cartesian product of (half-duplex,
-# full-duplex) * (zero payloads, one payload, test_constants.STREAM_LENGTH
-# payloads) * (completion, cancellation, expiration, programming defect in
-# servicer code).
-SEQUENCES = (
- _EASY,
- _PEASY,
-)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py
deleted file mode 100644
index 21cf33aeb6..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-
-class OperationState(object):
-
- def __init__(self):
- self.invocation_initial_metadata_in_flight = None
- self.invocation_initial_metadata_received = False
- self.invocation_payloads_in_flight = []
- self.invocation_payloads_received = 0
- self.invocation_completion_in_flight = None
- self.invocation_completion_received = False
- self.service_initial_metadata_in_flight = None
- self.service_initial_metadata_received = False
- self.service_payloads_in_flight = []
- self.service_payloads_received = 0
- self.service_completion_in_flight = None
- self.service_completion_received = False
- self.invocation_side_invocation_allowance = 1
- self.invocation_side_service_allowance = 1
- self.service_side_invocation_allowance = 1
- self.service_side_service_allowance = 1
- self.invocation_allowance_in_flight = 0
- self.service_allowance_in_flight = 0
- self.invocation_side_outcome = None
- self.service_side_outcome = None
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py
deleted file mode 100644
index 5d16bf98be..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py
+++ /dev/null
@@ -1,279 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Tests of the base interface of RPC Framework."""
-
-from __future__ import division
-
-import logging
-import random
-import threading
-import time
-import unittest
-
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.base import _control
-from tests.unit.framework.interfaces.base import test_interfaces
-
-_SYNCHRONICITY_VARIATION = (('Sync', False), ('Async', True))
-
-_EMPTY_OUTCOME_KIND_DICT = {
- outcome_kind: 0 for outcome_kind in base.Outcome.Kind}
-
-
-class _Serialization(test_interfaces.Serialization):
-
- def serialize_request(self, request):
- return request + request
-
- def deserialize_request(self, serialized_request):
- return serialized_request[:len(serialized_request) // 2]
-
- def serialize_response(self, response):
- return response * 3
-
- def deserialize_response(self, serialized_response):
- return serialized_response[2 * len(serialized_response) // 3:]
-
-
-def _advance(quadruples, operator, controller):
- try:
- for quadruple in quadruples:
- operator.advance(
- initial_metadata=quadruple[0], payload=quadruple[1],
- completion=quadruple[2], allowance=quadruple[3])
- except Exception as e: # pylint: disable=broad-except
- controller.failed('Exception on advance: %e' % e)
-
-
-class _Operator(base.Operator):
-
- def __init__(self, controller, on_advance, pool, operator_under_test):
- self._condition = threading.Condition()
- self._controller = controller
- self._on_advance = on_advance
- self._pool = pool
- self._operator_under_test = operator_under_test
- self._pending_advances = []
-
- def set_operator_under_test(self, operator_under_test):
- with self._condition:
- self._operator_under_test = operator_under_test
- pent_advances = self._pending_advances
- self._pending_advances = []
- pool = self._pool
- controller = self._controller
-
- if pool is None:
- _advance(pent_advances, operator_under_test, controller)
- else:
- pool.submit(_advance, pent_advances, operator_under_test, controller)
-
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- on_advance = self._on_advance(
- initial_metadata, payload, completion, allowance)
- if on_advance.kind is _control.OnAdvance.Kind.ADVANCE:
- with self._condition:
- pool = self._pool
- operator_under_test = self._operator_under_test
- controller = self._controller
-
- quadruple = (
- on_advance.initial_metadata, on_advance.payload,
- on_advance.completion, on_advance.allowance)
- if pool is None:
- _advance((quadruple,), operator_under_test, controller)
- else:
- pool.submit(_advance, (quadruple,), operator_under_test, controller)
- elif on_advance.kind is _control.OnAdvance.Kind.DEFECT:
- raise ValueError(
- 'Deliberately raised exception from Operator.advance (in a test)!')
-
-
-class _ProtocolReceiver(base.ProtocolReceiver):
-
- def __init__(self):
- self._condition = threading.Condition()
- self._contexts = []
-
- def context(self, protocol_context):
- with self._condition:
- self._contexts.append(protocol_context)
-
-
-class _Servicer(base.Servicer):
- """A base.Servicer with instrumented for testing."""
-
- def __init__(self, group, method, controllers, pool):
- self._condition = threading.Condition()
- self._group = group
- self._method = method
- self._pool = pool
- self._controllers = list(controllers)
-
- def service(self, group, method, context, output_operator):
- with self._condition:
- controller = self._controllers.pop(0)
- if group != self._group or method != self._method:
- controller.fail(
- '%s != %s or %s != %s' % (group, self._group, method, self._method))
- raise base.NoSuchMethodError(None, None)
- else:
- operator = _Operator(
- controller, controller.on_service_advance, self._pool,
- output_operator)
- outcome = context.add_termination_callback(
- controller.service_on_termination)
- if outcome is not None:
- controller.service_on_termination(outcome)
- return utilities.full_subscription(operator, _ProtocolReceiver())
-
-
-class _OperationTest(unittest.TestCase):
-
- def setUp(self):
- if self._synchronicity_variation:
- self._pool = logging_pool.pool(test_constants.POOL_SIZE)
- else:
- self._pool = None
- self._controller = self._controller_creator.controller(
- self._implementation, self._randomness)
-
- def tearDown(self):
- if self._synchronicity_variation:
- self._pool.shutdown(wait=True)
- else:
- self._pool = None
-
- def test_operation(self):
- invocation = self._controller.invocation()
- if invocation.subscription_kind is base.Subscription.Kind.FULL:
- test_operator = _Operator(
- self._controller, self._controller.on_invocation_advance,
- self._pool, None)
- subscription = utilities.full_subscription(
- test_operator, _ProtocolReceiver())
- else:
- # TODO(nathaniel): support and test other subscription kinds.
- self.fail('Non-full subscriptions not yet supported!')
-
- servicer = _Servicer(
- invocation.group, invocation.method, (self._controller,), self._pool)
-
- invocation_end, service_end, memo = self._implementation.instantiate(
- {(invocation.group, invocation.method): _Serialization()}, servicer)
-
- try:
- invocation_end.start()
- service_end.start()
- operation_context, operator_under_test = invocation_end.operate(
- invocation.group, invocation.method, subscription, invocation.timeout,
- initial_metadata=invocation.initial_metadata, payload=invocation.payload,
- completion=invocation.completion)
- test_operator.set_operator_under_test(operator_under_test)
- outcome = operation_context.add_termination_callback(
- self._controller.invocation_on_termination)
- if outcome is not None:
- self._controller.invocation_on_termination(outcome)
- except Exception as e: # pylint: disable=broad-except
- self._controller.failed('Exception on invocation: %s' % e)
- self.fail(e)
-
- while True:
- instruction = self._controller.poll()
- if instruction.kind is _control.Instruction.Kind.ADVANCE:
- try:
- test_operator.advance(
- *instruction.advance_args, **instruction.advance_kwargs)
- except Exception as e: # pylint: disable=broad-except
- self._controller.failed('Exception on instructed advance: %s' % e)
- elif instruction.kind is _control.Instruction.Kind.CANCEL:
- try:
- operation_context.cancel()
- except Exception as e: # pylint: disable=broad-except
- self._controller.failed('Exception on cancel: %s' % e)
- elif instruction.kind is _control.Instruction.Kind.CONCLUDE:
- break
-
- invocation_stop_event = invocation_end.stop(0)
- service_stop_event = service_end.stop(0)
- invocation_stop_event.wait()
- service_stop_event.wait()
- invocation_stats = invocation_end.operation_stats()
- service_stats = service_end.operation_stats()
-
- self._implementation.destantiate(memo)
-
- self.assertTrue(
- instruction.conclude_success, msg=instruction.conclude_message)
-
- expected_invocation_stats = dict(_EMPTY_OUTCOME_KIND_DICT)
- expected_invocation_stats[
- instruction.conclude_invocation_outcome_kind] += 1
- self.assertDictEqual(expected_invocation_stats, invocation_stats)
- expected_service_stats = dict(_EMPTY_OUTCOME_KIND_DICT)
- expected_service_stats[instruction.conclude_service_outcome_kind] += 1
- self.assertDictEqual(expected_service_stats, service_stats)
-
-
-def test_cases(implementation):
- """Creates unittest.TestCase classes for a given Base implementation.
-
- Args:
- implementation: A test_interfaces.Implementation specifying creation and
- destruction of the Base implementation under test.
-
- Returns:
- A sequence of subclasses of unittest.TestCase defining tests of the
- specified Base layer implementation.
- """
- random_seed = hash(time.time())
- logging.warning('Random seed for this execution: %s', random_seed)
- randomness = random.Random(x=random_seed)
-
- test_case_classes = []
- for synchronicity_variation in _SYNCHRONICITY_VARIATION:
- for controller_creator in _control.CONTROLLER_CREATORS:
- name = ''.join(
- (synchronicity_variation[0], controller_creator.name(), 'Test',))
- test_case_classes.append(
- type(name, (_OperationTest,),
- {'_implementation': implementation,
- '_randomness': randomness,
- '_synchronicity_variation': synchronicity_variation[1],
- '_controller_creator': controller_creator,
- '__module__': implementation.__module__,
- }))
-
- return test_case_classes
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py
deleted file mode 100644
index 5eba475ba8..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Interfaces used in tests of implementations of the Base layer."""
-
-import abc
-
-import six
-
-from grpc.framework.interfaces.base import base # pylint: disable=unused-import
-
-
-class Serialization(six.with_metaclass(abc.ABCMeta)):
- """Specifies serialization and deserialization of test payloads."""
-
- def serialize_request(self, request):
- """Serializes a request value used in a test.
-
- Args:
- request: A request value created by a test.
-
- Returns:
- A bytestring that is the serialization of the given request.
- """
- raise NotImplementedError()
-
- def deserialize_request(self, serialized_request):
- """Deserializes a request value used in a test.
-
- Args:
- serialized_request: A bytestring that is the serialization of some request
- used in a test.
-
- Returns:
- The request value encoded by the given bytestring.
- """
- raise NotImplementedError()
-
- def serialize_response(self, response):
- """Serializes a response value used in a test.
-
- Args:
- response: A response value created by a test.
-
- Returns:
- A bytestring that is the serialization of the given response.
- """
- raise NotImplementedError()
-
- def deserialize_response(self, serialized_response):
- """Deserializes a response value used in a test.
-
- Args:
- serialized_response: A bytestring that is the serialization of some
- response used in a test.
-
- Returns:
- The response value encoded by the given bytestring.
- """
- raise NotImplementedError()
-
-
-class Implementation(six.with_metaclass(abc.ABCMeta)):
- """Specifies an implementation of the Base layer."""
-
- @abc.abstractmethod
- def instantiate(self, serializations, servicer):
- """Instantiates the Base layer implementation to be used in a test.
-
- Args:
- serializations: A dict from group-method pair to Serialization object
- specifying how to serialize and deserialize payload values used in the
- test.
- servicer: A base.Servicer object to be called to service RPCs made during
- the test.
-
- Returns:
- A sequence of length three the first element of which is a
- base.End to be used to invoke RPCs, the second element of which is a
- base.End to be used to service invoked RPCs, and the third element of
- which is an arbitrary memo object to be kept and passed to destantiate
- at the conclusion of the test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def destantiate(self, memo):
- """Destroys the Base layer implementation under test.
-
- Args:
- memo: The object from the third position of the return value of a call to
- instantiate.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def invocation_initial_metadata(self):
- """Provides an operation's invocation-side initial metadata.
-
- Returns:
- A value to use for an operation's invocation-side initial metadata, or
- None.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def service_initial_metadata(self):
- """Provides an operation's service-side initial metadata.
-
- Returns:
- A value to use for an operation's service-side initial metadata, or
- None.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def invocation_completion(self):
- """Provides an operation's invocation-side completion.
-
- Returns:
- A base.Completion to use for an operation's invocation-side completion.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def service_completion(self):
- """Provides an operation's service-side completion.
-
- Returns:
- A base.Completion to use for an operation's service-side completion.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def metadata_transmitted(self, original_metadata, transmitted_metadata):
- """Identifies whether or not metadata was properly transmitted.
-
- Args:
- original_metadata: A metadata value passed to the system under test.
- transmitted_metadata: The same metadata value after having been
- transmitted through the system under test.
-
- Returns:
- Whether or not the metadata was properly transmitted.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def completion_transmitted(self, original_completion, transmitted_completion):
- """Identifies whether or not a base.Completion was properly transmitted.
-
- Args:
- original_completion: A base.Completion passed to the system under test.
- transmitted_completion: The same completion value after having been
- transmitted through the system under test.
-
- Returns:
- Whether or not the completion was properly transmitted.
- """
- raise NotImplementedError()
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py
deleted file mode 100644
index 48f31fc677..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A utility useful in tests of asynchronous, event-driven interfaces."""
-
-import threading
-
-from grpc.framework.interfaces.face import face
-
-
-class Receiver(face.ResponseReceiver):
- """A utility object useful in tests of asynchronous code."""
-
- def __init__(self):
- self._condition = threading.Condition()
- self._initial_metadata = None
- self._responses = []
- self._terminal_metadata = None
- self._code = None
- self._details = None
- self._completed = False
- self._abortion = None
-
- def abort(self, abortion):
- with self._condition:
- self._abortion = abortion
- self._condition.notify_all()
-
- def initial_metadata(self, initial_metadata):
- with self._condition:
- self._initial_metadata = initial_metadata
-
- def response(self, response):
- with self._condition:
- self._responses.append(response)
-
- def complete(self, terminal_metadata, code, details):
- with self._condition:
- self._terminal_metadata = terminal_metadata
- self._code = code
- self._details = details
- self._completed = True
- self._condition.notify_all()
-
- def block_until_terminated(self):
- with self._condition:
- while self._abortion is None and not self._completed:
- self._condition.wait()
-
- def unary_response(self):
- with self._condition:
- if self._abortion is not None:
- raise AssertionError('Aborted: "{}"!'.format(self._abortion))
- elif len(self._responses) != 1:
- raise AssertionError(
- '%d responses received, not exactly one!', len(self._responses))
- else:
- return self._responses[0]
-
- def stream_responses(self):
- with self._condition:
- if self._abortion is None:
- return list(self._responses)
- else:
- raise AssertionError('Aborted: "{}"!'.format(self._abortion))
-
- def abortion(self):
- with self._condition:
- return self._abortion
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py
deleted file mode 100644
index 7086519106..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py
deleted file mode 100644
index 608e64119e..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Tests of the links interface of RPC Framework."""
-
-# unittest is referenced from specification in this module.
-import abc
-import unittest # pylint: disable=unused-import
-
-import six
-
-from grpc.framework.interfaces.links import links
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.links import test_utilities
-
-
-def at_least_n_payloads_received_predicate(n):
- def predicate(ticket_sequence):
- payload_count = 0
- for ticket in ticket_sequence:
- if ticket.payload is not None:
- payload_count += 1
- if n <= payload_count:
- return True
- else:
- return False
- return predicate
-
-
-def terminated(ticket_sequence):
- return ticket_sequence and ticket_sequence[-1].termination is not None
-
-_TRANSMISSION_GROUP = 'test.Group'
-_TRANSMISSION_METHOD = 'TestMethod'
-
-
-class TransmissionTest(six.with_metaclass(abc.ABCMeta)):
- """Tests ticket transmission between two connected links.
-
- This class must be mixed into a unittest.TestCase that implements the abstract
- methods it provides.
- """
-
- # This is a unittest.TestCase mix-in.
- # pylint: disable=invalid-name
-
- @abc.abstractmethod
- def create_transmitting_links(self):
- """Creates two connected links for use in this test.
-
- Returns:
- Two links.Links, the first of which will be used on the invocation side
- of RPCs and the second of which will be used on the service side of
- RPCs.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def destroy_transmitting_links(self, invocation_side_link, service_side_link):
- """Destroys the two connected links created for this test.
-
-
- Args:
- invocation_side_link: The link used on the invocation side of RPCs in
- this test.
- service_side_link: The link used on the service side of RPCs in this
- test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_invocation_initial_metadata(self):
- """Creates a value for use as invocation-side initial metadata.
-
- Returns:
- A metadata value appropriate for use as invocation-side initial metadata
- or None if invocation-side initial metadata transmission is not
- supported by the links under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_invocation_terminal_metadata(self):
- """Creates a value for use as invocation-side terminal metadata.
-
- Returns:
- A metadata value appropriate for use as invocation-side terminal
- metadata or None if invocation-side terminal metadata transmission is
- not supported by the links under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_service_initial_metadata(self):
- """Creates a value for use as service-side initial metadata.
-
- Returns:
- A metadata value appropriate for use as service-side initial metadata or
- None if service-side initial metadata transmission is not supported by
- the links under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_service_terminal_metadata(self):
- """Creates a value for use as service-side terminal metadata.
-
- Returns:
- A metadata value appropriate for use as service-side terminal metadata or
- None if service-side terminal metadata transmission is not supported by
- the links under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_invocation_completion(self):
- """Creates values for use as invocation-side code and message.
-
- Returns:
- An invocation-side code value and an invocation-side message value.
- Either or both may be None if invocation-side code and/or
- invocation-side message transmission is not supported by the links
- under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_service_completion(self):
- """Creates values for use as service-side code and message.
-
- Returns:
- A service-side code value and a service-side message value. Either or
- both may be None if service-side code and/or service-side message
- transmission is not supported by the links under test.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
- """Asserts that transmitted_metadata contains original_metadata.
-
- Args:
- original_metadata: A metadata object used in this test.
- transmitted_metadata: A metadata object obtained after transmission
- through the system under test.
-
- Raises:
- AssertionError: if the transmitted_metadata object does not contain
- original_metadata.
- """
- raise NotImplementedError()
-
- def group_and_method(self):
- """Returns the group and method used in this test case.
-
- Returns:
- A pair of the group and method used in this test case.
- """
- return _TRANSMISSION_GROUP, _TRANSMISSION_METHOD
-
- def serialize_request(self, request):
- """Serializes a request value used in this test case.
-
- Args:
- request: A request value created by this test case.
-
- Returns:
- A bytestring that is the serialization of the given request.
- """
- return request
-
- def deserialize_request(self, serialized_request):
- """Deserializes a request value used in this test case.
-
- Args:
- serialized_request: A bytestring that is the serialization of some request
- used in this test case.
-
- Returns:
- The request value encoded by the given bytestring.
- """
- return serialized_request
-
- def serialize_response(self, response):
- """Serializes a response value used in this test case.
-
- Args:
- response: A response value created by this test case.
-
- Returns:
- A bytestring that is the serialization of the given response.
- """
- return response
-
- def deserialize_response(self, serialized_response):
- """Deserializes a response value used in this test case.
-
- Args:
- serialized_response: A bytestring that is the serialization of some
- response used in this test case.
-
- Returns:
- The response value encoded by the given bytestring.
- """
- return serialized_response
-
- def _assert_is_valid_metadata_payload_sequence(
- self, ticket_sequence, payloads, initial_metadata, terminal_metadata):
- initial_metadata_seen = False
- seen_payloads = []
- terminal_metadata_seen = False
-
- for ticket in ticket_sequence:
- if ticket.initial_metadata is not None:
- self.assertFalse(initial_metadata_seen)
- self.assertFalse(seen_payloads)
- self.assertFalse(terminal_metadata_seen)
- self.assertMetadataTransmitted(initial_metadata, ticket.initial_metadata)
- initial_metadata_seen = True
-
- if ticket.payload is not None:
- self.assertFalse(terminal_metadata_seen)
- seen_payloads.append(ticket.payload)
-
- if ticket.terminal_metadata is not None:
- self.assertFalse(terminal_metadata_seen)
- self.assertMetadataTransmitted(terminal_metadata, ticket.terminal_metadata)
- terminal_metadata_seen = True
- self.assertSequenceEqual(payloads, seen_payloads)
-
- def _assert_is_valid_invocation_sequence(
- self, ticket_sequence, group, method, payloads, initial_metadata,
- terminal_metadata, termination):
- self.assertLess(0, len(ticket_sequence))
- self.assertEqual(group, ticket_sequence[0].group)
- self.assertEqual(method, ticket_sequence[0].method)
- self._assert_is_valid_metadata_payload_sequence(
- ticket_sequence, payloads, initial_metadata, terminal_metadata)
- self.assertIs(termination, ticket_sequence[-1].termination)
-
- def _assert_is_valid_service_sequence(
- self, ticket_sequence, payloads, initial_metadata, terminal_metadata,
- code, message, termination):
- self.assertLess(0, len(ticket_sequence))
- self._assert_is_valid_metadata_payload_sequence(
- ticket_sequence, payloads, initial_metadata, terminal_metadata)
- self.assertEqual(code, ticket_sequence[-1].code)
- self.assertEqual(message, ticket_sequence[-1].message)
- self.assertIs(termination, ticket_sequence[-1].termination)
-
- def setUp(self):
- self._invocation_link, self._service_link = self.create_transmitting_links()
- self._invocation_mate = test_utilities.RecordingLink()
- self._service_mate = test_utilities.RecordingLink()
- self._invocation_link.join_link(self._invocation_mate)
- self._service_link.join_link(self._service_mate)
-
- def tearDown(self):
- self.destroy_transmitting_links(self._invocation_link, self._service_link)
-
- def testSimplestRoundTrip(self):
- """Tests transmission of one ticket in each direction."""
- invocation_operation_id = object()
- invocation_payload = b'\x07' * 1023
- timeout = test_constants.LONG_TIMEOUT
- invocation_initial_metadata = self.create_invocation_initial_metadata()
- invocation_terminal_metadata = self.create_invocation_terminal_metadata()
- invocation_code, invocation_message = self.create_invocation_completion()
- service_payload = b'\x08' * 1025
- service_initial_metadata = self.create_service_initial_metadata()
- service_terminal_metadata = self.create_service_terminal_metadata()
- service_code, service_message = self.create_service_completion()
-
- original_invocation_ticket = links.Ticket(
- invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD,
- links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata,
- invocation_payload, invocation_terminal_metadata, invocation_code,
- invocation_message, links.Ticket.Termination.COMPLETION, None)
- self._invocation_link.accept_ticket(original_invocation_ticket)
-
- self._service_mate.block_until_tickets_satisfy(
- at_least_n_payloads_received_predicate(1))
- service_operation_id = self._service_mate.tickets()[0].operation_id
-
- self._service_mate.block_until_tickets_satisfy(terminated)
- self._assert_is_valid_invocation_sequence(
- self._service_mate.tickets(), _TRANSMISSION_GROUP, _TRANSMISSION_METHOD,
- (invocation_payload,), invocation_initial_metadata,
- invocation_terminal_metadata, links.Ticket.Termination.COMPLETION)
-
- original_service_ticket = links.Ticket(
- service_operation_id, 0, None, None, links.Ticket.Subscription.FULL,
- timeout, 0, service_initial_metadata, service_payload,
- service_terminal_metadata, service_code, service_message,
- links.Ticket.Termination.COMPLETION, None)
- self._service_link.accept_ticket(original_service_ticket)
- self._invocation_mate.block_until_tickets_satisfy(terminated)
- self._assert_is_valid_service_sequence(
- self._invocation_mate.tickets(), (service_payload,),
- service_initial_metadata, service_terminal_metadata, service_code,
- service_message, links.Ticket.Termination.COMPLETION)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py
deleted file mode 100644
index 39c7f2fc63..0000000000
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior appropriate for use in tests."""
-
-import logging
-import threading
-import time
-
-from grpc.framework.interfaces.links import links
-from grpc.framework.interfaces.links import utilities
-
-# A more-or-less arbitrary limit on the length of raw data values to be logged.
-_UNCOMFORTABLY_LONG = 48
-
-
-def _safe_for_log_ticket(ticket):
- """Creates a safe-for-printing-to-the-log ticket for a given ticket.
-
- Args:
- ticket: Any links.Ticket.
-
- Returns:
- A links.Ticket that is as much as can be equal to the given ticket but
- possibly features values like the string "<payload of length 972321>" in
- place of the actual values of the given ticket.
- """
- if isinstance(ticket.payload, (basestring,)):
- payload_length = len(ticket.payload)
- else:
- payload_length = -1
- if payload_length < _UNCOMFORTABLY_LONG:
- return ticket
- else:
- return links.Ticket(
- ticket.operation_id, ticket.sequence_number,
- ticket.group, ticket.method, ticket.subscription, ticket.timeout,
- ticket.allowance, ticket.initial_metadata,
- '<payload of length {}>'.format(payload_length),
- ticket.terminal_metadata, ticket.code, ticket.message,
- ticket.termination, None)
-
-
-class RecordingLink(links.Link):
- """A Link that records every ticket passed to it."""
-
- def __init__(self):
- self._condition = threading.Condition()
- self._tickets = []
-
- def accept_ticket(self, ticket):
- with self._condition:
- self._tickets.append(ticket)
- self._condition.notify_all()
-
- def join_link(self, link):
- pass
-
- def block_until_tickets_satisfy(self, predicate):
- """Blocks until the received tickets satisfy the given predicate.
-
- Args:
- predicate: A callable that takes a sequence of tickets and returns a
- boolean value.
- """
- with self._condition:
- while not predicate(self._tickets):
- self._condition.wait()
-
- def tickets(self):
- """Returns a copy of the list of all tickets received by this Link."""
- with self._condition:
- return tuple(self._tickets)
-
-
-class _Pipe(object):
- """A conduit that logs all tickets passed through it."""
-
- def __init__(self, name):
- self._lock = threading.Lock()
- self._name = name
- self._left_mate = utilities.NULL_LINK
- self._right_mate = utilities.NULL_LINK
-
- def accept_left_to_right_ticket(self, ticket):
- with self._lock:
- logging.warning(
- '%s: moving left to right through %s: %s', time.time(), self._name,
- _safe_for_log_ticket(ticket))
- try:
- self._right_mate.accept_ticket(ticket)
- except Exception as e: # pylint: disable=broad-except
- logging.exception(e)
-
- def accept_right_to_left_ticket(self, ticket):
- with self._lock:
- logging.warning(
- '%s: moving right to left through %s: %s', time.time(), self._name,
- _safe_for_log_ticket(ticket))
- try:
- self._left_mate.accept_ticket(ticket)
- except Exception as e: # pylint: disable=broad-except
- logging.exception(e)
-
- def join_left_mate(self, left_mate):
- with self._lock:
- self._left_mate = utilities.NULL_LINK if left_mate is None else left_mate
-
- def join_right_mate(self, right_mate):
- with self._lock:
- self._right_mate = (
- utilities.NULL_LINK if right_mate is None else right_mate)
-
-
-class _Facade(links.Link):
-
- def __init__(self, accept, join):
- self._accept = accept
- self._join = join
-
- def accept_ticket(self, ticket):
- self._accept(ticket)
-
- def join_link(self, link):
- self._join(link)
-
-
-def logging_links(name):
- """Creates a conduit that logs all tickets passed through it.
-
- Args:
- name: A name to use for the conduit to identify itself in logging output.
-
- Returns:
- Two links.Links, the first of which is the "left" side of the conduit
- and the second of which is the "right" side of the conduit.
- """
- pipe = _Pipe(name)
- left_facade = _Facade(pipe.accept_left_to_right_ticket, pipe.join_left_mate)
- right_facade = _Facade(pipe.accept_right_to_left_ticket, pipe.join_right_mate)
- return left_facade, right_facade
diff --git a/src/ruby/bin/math_services.rb b/src/ruby/bin/math_services.rb
index 34c36abdda..2b97602b6f 100755
--- a/src/ruby/bin/math_services.rb
+++ b/src/ruby/bin/math_services.rb
@@ -44,15 +44,15 @@ module Math
self.unmarshal_class_method = :decode
self.service_name = 'math.Math'
- # Div divides args.dividend by args.divisor and returns the quotient and
- # remainder.
+ # Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ # and remainder.
rpc :Div, DivArgs, DivReply
# DivMany accepts an arbitrary number of division args from the client stream
# and sends back the results in the reply stream. The stream continues until
# the client closes its end; the server does the same after sending all the
# replies. The stream ends immediately if either end aborts.
rpc :DivMany, stream(DivArgs), stream(DivReply)
- # Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ # Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
# generates up to limit numbers; otherwise it continues until the call is
# canceled. Unlike Fib above, Fib has no final FibReply.
rpc :Fib, FibArgs, stream(Num)
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 2126124443..67a42af619 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -38,6 +38,7 @@
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
+#include <grpc/impl/codegen/compression_types.h>
#include "rb_byte_buffer.h"
#include "rb_call_credentials.h"
@@ -910,6 +911,12 @@ static void Init_grpc_op_codes() {
UINT2NUM(GRPC_OP_RECV_CLOSE_ON_SERVER));
}
+static void Init_grpc_metadata_keys() {
+ VALUE grpc_rb_mMetadataKeys = rb_define_module_under(grpc_rb_mGrpcCore, "MetadataKeys");
+ rb_define_const(grpc_rb_mMetadataKeys, "COMPRESSION_REQUEST_ALGORITHM",
+ rb_str_new2(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY));
+}
+
void Init_grpc_call() {
/* CallError inherits from Exception to signal that it is non-recoverable */
grpc_rb_eCallError =
@@ -972,6 +979,7 @@ void Init_grpc_call() {
Init_grpc_error_codes();
Init_grpc_op_codes();
Init_grpc_write_flags();
+ Init_grpc_metadata_keys();
}
/* Gets the call from the ruby object */
diff --git a/src/ruby/ext/grpc/rb_compression_options.c b/src/ruby/ext/grpc/rb_compression_options.c
new file mode 100644
index 0000000000..0a3a215b1c
--- /dev/null
+++ b/src/ruby/ext/grpc/rb_compression_options.c
@@ -0,0 +1,464 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <ruby/ruby.h>
+
+#include "rb_compression_options.h"
+#include "rb_grpc_imports.generated.h"
+
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/impl/codegen/alloc.h>
+#include <grpc/impl/codegen/compression_types.h>
+#include <grpc/impl/codegen/grpc_types.h>
+#include <string.h>
+
+#include "rb_grpc.h"
+
+static VALUE grpc_rb_cCompressionOptions = Qnil;
+
+/* Ruby Ids for the names of valid compression levels. */
+static VALUE id_compress_level_none = Qnil;
+static VALUE id_compress_level_low = Qnil;
+static VALUE id_compress_level_medium = Qnil;
+static VALUE id_compress_level_high = Qnil;
+
+/* grpc_rb_compression_options wraps a grpc_compression_options.
+ * It can be used to get the channel argument key-values for specific
+ * compression settings. */
+
+/* Note that ruby objects of this type don't carry any state in other
+ * Ruby objects and don't have a mark for GC. */
+typedef struct grpc_rb_compression_options {
+ /* The actual compression options that's being wrapped */
+ grpc_compression_options *wrapped;
+} grpc_rb_compression_options;
+
+/* Destroys the compression options instances and free the
+ * wrapped grpc compression options. */
+static void grpc_rb_compression_options_free(void *p) {
+ grpc_rb_compression_options *wrapper = NULL;
+ if (p == NULL) {
+ return;
+ };
+ wrapper = (grpc_rb_compression_options *)p;
+
+ if (wrapper->wrapped != NULL) {
+ gpr_free(wrapper->wrapped);
+ wrapper->wrapped = NULL;
+ }
+
+ xfree(p);
+}
+
+/* Ruby recognized data type for the CompressionOptions class. */
+static rb_data_type_t grpc_rb_compression_options_data_type = {
+ "grpc_compression_options",
+ {NULL,
+ grpc_rb_compression_options_free,
+ GRPC_RB_MEMSIZE_UNAVAILABLE,
+ {NULL, NULL}},
+ NULL,
+ NULL,
+#ifdef RUBY_TYPED_FREE_IMMEDIATELY
+ RUBY_TYPED_FREE_IMMEDIATELY
+#endif
+};
+
+/* Allocates CompressionOptions instances.
+ Allocate the wrapped grpc compression options and
+ initialize it here too. */
+static VALUE grpc_rb_compression_options_alloc(VALUE cls) {
+ grpc_rb_compression_options *wrapper =
+ gpr_malloc(sizeof(grpc_rb_compression_options));
+ wrapper->wrapped = NULL;
+ wrapper->wrapped = gpr_malloc(sizeof(grpc_compression_options));
+ grpc_compression_options_init(wrapper->wrapped);
+
+ return TypedData_Wrap_Struct(cls, &grpc_rb_compression_options_data_type,
+ wrapper);
+}
+
+/* Disables a compression algorithm, given the GRPC core internal number of a
+ * compression algorithm. */
+VALUE grpc_rb_compression_options_disable_compression_algorithm_internal(
+ VALUE self, VALUE algorithm_to_disable) {
+ grpc_compression_algorithm compression_algorithm = 0;
+ grpc_rb_compression_options *wrapper = NULL;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+ compression_algorithm =
+ (grpc_compression_algorithm)NUM2INT(algorithm_to_disable);
+
+ grpc_compression_options_disable_algorithm(wrapper->wrapped,
+ compression_algorithm);
+
+ return Qnil;
+}
+
+/* Gets the compression internal enum value of a compression level given its
+ * name. */
+grpc_compression_level grpc_rb_compression_options_level_name_to_value_internal(
+ VALUE level_name) {
+ Check_Type(level_name, T_SYMBOL);
+
+ /* Check the compression level of the name passed in, and see which macro
+ * from the GRPC core header files match. */
+ if (id_compress_level_none == SYM2ID(level_name)) {
+ return GRPC_COMPRESS_LEVEL_NONE;
+ } else if (id_compress_level_low == SYM2ID(level_name)) {
+ return GRPC_COMPRESS_LEVEL_LOW;
+ } else if (id_compress_level_medium == SYM2ID(level_name)) {
+ return GRPC_COMPRESS_LEVEL_MED;
+ } else if (id_compress_level_high == SYM2ID(level_name)) {
+ return GRPC_COMPRESS_LEVEL_HIGH;
+ }
+
+ rb_raise(rb_eArgError,
+ "Unrecognized compression level name."
+ "Valid compression level names are none, low, medium, and high.");
+
+ /* Dummy return statement. */
+ return GRPC_COMPRESS_LEVEL_NONE;
+}
+
+/* Sets the default compression level, given the name of a compression level.
+ * Throws an error if no algorithm matched. */
+void grpc_rb_compression_options_set_default_level(
+ grpc_compression_options *options, VALUE new_level_name) {
+ options->default_level.level =
+ grpc_rb_compression_options_level_name_to_value_internal(new_level_name);
+ options->default_level.is_set = 1;
+}
+
+/* Gets the internal value of a compression algorithm suitable as the value
+ * in a GRPC core channel arguments hash.
+ * algorithm_value is an out parameter.
+ * Raises an error if the name of the algorithm passed in is invalid. */
+void grpc_rb_compression_options_algorithm_name_to_value_internal(
+ grpc_compression_algorithm *algorithm_value, VALUE algorithm_name) {
+ char *name_str = NULL;
+ long name_len = 0;
+ VALUE algorithm_name_as_string = Qnil;
+
+ Check_Type(algorithm_name, T_SYMBOL);
+
+ /* Convert the algorithm symbol to a ruby string, so that we can get the
+ * correct C string out of it. */
+ algorithm_name_as_string = rb_funcall(algorithm_name, rb_intern("to_s"), 0);
+
+ name_str = RSTRING_PTR(algorithm_name_as_string);
+ name_len = RSTRING_LEN(algorithm_name_as_string);
+
+ /* Raise an error if the name isn't recognized as a compression algorithm by
+ * the algorithm parse function
+ * in GRPC core. */
+ if (!grpc_compression_algorithm_parse(name_str, name_len, algorithm_value)) {
+ rb_raise(rb_eNameError, "Invalid compression algorithm name: %s",
+ StringValueCStr(algorithm_name_as_string));
+ }
+}
+
+/* Indicates whether a given algorithm is enabled on this instance, given the
+ * readable algorithm name. */
+VALUE grpc_rb_compression_options_is_algorithm_enabled(VALUE self,
+ VALUE algorithm_name) {
+ grpc_rb_compression_options *wrapper = NULL;
+ grpc_compression_algorithm internal_algorithm_value;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+ grpc_rb_compression_options_algorithm_name_to_value_internal(
+ &internal_algorithm_value, algorithm_name);
+
+ if (grpc_compression_options_is_algorithm_enabled(wrapper->wrapped,
+ internal_algorithm_value)) {
+ return Qtrue;
+ }
+ return Qfalse;
+}
+
+/* Sets the default algorithm to the name of the algorithm passed in.
+ * Raises an error if the name is not a valid compression algorithm name. */
+void grpc_rb_compression_options_set_default_algorithm(
+ grpc_compression_options *options, VALUE algorithm_name) {
+ grpc_rb_compression_options_algorithm_name_to_value_internal(
+ &options->default_algorithm.algorithm, algorithm_name);
+ options->default_algorithm.is_set = 1;
+}
+
+/* Disables an algorithm on the current instance, given the name of an
+ * algorithm.
+ * Fails if the algorithm name is invalid. */
+void grpc_rb_compression_options_disable_algorithm(
+ grpc_compression_options *compression_options, VALUE algorithm_name) {
+ grpc_compression_algorithm internal_algorithm_value;
+
+ grpc_rb_compression_options_algorithm_name_to_value_internal(
+ &internal_algorithm_value, algorithm_name);
+ grpc_compression_options_disable_algorithm(compression_options,
+ internal_algorithm_value);
+}
+
+/* Provides a ruby hash of GRPC core channel argument key-values that
+ * correspond to the compression settings on this instance. */
+VALUE grpc_rb_compression_options_to_hash(VALUE self) {
+ grpc_rb_compression_options *wrapper = NULL;
+ grpc_compression_options *compression_options = NULL;
+ VALUE channel_arg_hash = rb_hash_new();
+ VALUE key = Qnil;
+ VALUE value = Qnil;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+ compression_options = wrapper->wrapped;
+
+ /* Add key-value pairs to the new Ruby hash. It can be used
+ * as GRPC core channel arguments. */
+ if (compression_options->default_level.is_set) {
+ key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL);
+ value = INT2NUM((int)compression_options->default_level.level);
+ rb_hash_aset(channel_arg_hash, key, value);
+ }
+
+ if (compression_options->default_algorithm.is_set) {
+ key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM);
+ value = INT2NUM((int)compression_options->default_algorithm.algorithm);
+ rb_hash_aset(channel_arg_hash, key, value);
+ }
+
+ key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET);
+ value = INT2NUM((int)compression_options->enabled_algorithms_bitset);
+ rb_hash_aset(channel_arg_hash, key, value);
+
+ return channel_arg_hash;
+}
+
+/* Converts an internal enum level value to a readable level name.
+ * Fails if the level value is invalid. */
+VALUE grpc_rb_compression_options_level_value_to_name_internal(
+ grpc_compression_level compression_value) {
+ switch (compression_value) {
+ case GRPC_COMPRESS_LEVEL_NONE:
+ return ID2SYM(id_compress_level_none);
+ case GRPC_COMPRESS_LEVEL_LOW:
+ return ID2SYM(id_compress_level_low);
+ case GRPC_COMPRESS_LEVEL_MED:
+ return ID2SYM(id_compress_level_medium);
+ case GRPC_COMPRESS_LEVEL_HIGH:
+ return ID2SYM(id_compress_level_high);
+ default:
+ rb_raise(
+ rb_eArgError,
+ "Failed to convert compression level value to name for value: %d",
+ (int)compression_value);
+ }
+}
+
+/* Converts an algorithm internal enum value to a readable name.
+ * Fails if the enum value is invalid. */
+VALUE grpc_rb_compression_options_algorithm_value_to_name_internal(
+ grpc_compression_algorithm internal_value) {
+ char *algorithm_name = NULL;
+
+ if (!grpc_compression_algorithm_name(internal_value, &algorithm_name)) {
+ rb_raise(rb_eArgError, "Failed to convert algorithm value to name");
+ }
+
+ return ID2SYM(rb_intern(algorithm_name));
+}
+
+/* Gets the readable name of the default algorithm if one has been set.
+ * Returns nil if no algorithm has been set. */
+VALUE grpc_rb_compression_options_get_default_algorithm(VALUE self) {
+ grpc_compression_algorithm internal_value;
+ grpc_rb_compression_options *wrapper = NULL;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+
+ if (wrapper->wrapped->default_algorithm.is_set) {
+ internal_value = wrapper->wrapped->default_algorithm.algorithm;
+ return grpc_rb_compression_options_algorithm_value_to_name_internal(
+ internal_value);
+ }
+
+ return Qnil;
+}
+
+/* Gets the internal value of the default compression level that is to be passed
+ * to the GRPC core as a channel argument value.
+ * A nil return value means that it hasn't been set. */
+VALUE grpc_rb_compression_options_get_default_level(VALUE self) {
+ grpc_compression_level internal_value;
+ grpc_rb_compression_options *wrapper = NULL;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+
+ if (wrapper->wrapped->default_level.is_set) {
+ internal_value = wrapper->wrapped->default_level.level;
+ return grpc_rb_compression_options_level_value_to_name_internal(
+ internal_value);
+ }
+
+ return Qnil;
+}
+
+/* Gets a list of the disabled algorithms as readable names.
+ * Returns an empty list if no algorithms have been disabled. */
+VALUE grpc_rb_compression_options_get_disabled_algorithms(VALUE self) {
+ VALUE disabled_algorithms = rb_ary_new();
+ grpc_compression_algorithm internal_value;
+ grpc_rb_compression_options *wrapper = NULL;
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+
+ for (internal_value = GRPC_COMPRESS_NONE;
+ internal_value < GRPC_COMPRESS_ALGORITHMS_COUNT; internal_value++) {
+ if (!grpc_compression_options_is_algorithm_enabled(wrapper->wrapped,
+ internal_value)) {
+ rb_ary_push(disabled_algorithms,
+ grpc_rb_compression_options_algorithm_value_to_name_internal(
+ internal_value));
+ }
+ }
+ return disabled_algorithms;
+}
+
+/* Initializes the compression options wrapper.
+ * Takes an optional hash parameter.
+ *
+ * Example call-seq:
+ * options = CompressionOptions.new(
+ * default_level: :none,
+ * disabled_algorithms: [:gzip]
+ * )
+ * channel_arg hash = Hash.new[...]
+ * channel_arg_hash_with_compression_options = channel_arg_hash.merge(options)
+ */
+VALUE grpc_rb_compression_options_init(int argc, VALUE *argv, VALUE self) {
+ grpc_rb_compression_options *wrapper = NULL;
+ VALUE default_algorithm = Qnil;
+ VALUE default_level = Qnil;
+ VALUE disabled_algorithms = Qnil;
+ VALUE algorithm_name = Qnil;
+ VALUE hash_arg = Qnil;
+
+ rb_scan_args(argc, argv, "01", &hash_arg);
+
+ /* Check if the hash parameter was passed, or if invalid arguments were
+ * passed. */
+ if (hash_arg == Qnil) {
+ return self;
+ } else if (TYPE(hash_arg) != T_HASH || argc > 1) {
+ rb_raise(rb_eArgError,
+ "Invalid arguments. Expecting optional hash parameter");
+ }
+
+ TypedData_Get_Struct(self, grpc_rb_compression_options,
+ &grpc_rb_compression_options_data_type, wrapper);
+
+ /* Set the default algorithm if one was chosen. */
+ default_algorithm =
+ rb_hash_aref(hash_arg, ID2SYM(rb_intern("default_algorithm")));
+ if (default_algorithm != Qnil) {
+ grpc_rb_compression_options_set_default_algorithm(wrapper->wrapped,
+ default_algorithm);
+ }
+
+ /* Set the default level if one was chosen. */
+ default_level = rb_hash_aref(hash_arg, ID2SYM(rb_intern("default_level")));
+ if (default_level != Qnil) {
+ grpc_rb_compression_options_set_default_level(wrapper->wrapped,
+ default_level);
+ }
+
+ /* Set the disabled algorithms if any were chosen. */
+ disabled_algorithms =
+ rb_hash_aref(hash_arg, ID2SYM(rb_intern("disabled_algorithms")));
+ if (disabled_algorithms != Qnil) {
+ Check_Type(disabled_algorithms, T_ARRAY);
+
+ for (int i = 0; i < RARRAY_LEN(disabled_algorithms); i++) {
+ algorithm_name = rb_ary_entry(disabled_algorithms, i);
+ grpc_rb_compression_options_disable_algorithm(wrapper->wrapped,
+ algorithm_name);
+ }
+ }
+
+ return self;
+}
+
+void Init_grpc_compression_options() {
+ grpc_rb_cCompressionOptions = rb_define_class_under(
+ grpc_rb_mGrpcCore, "CompressionOptions", rb_cObject);
+
+ /* Allocates an object managed by the ruby runtime. */
+ rb_define_alloc_func(grpc_rb_cCompressionOptions,
+ grpc_rb_compression_options_alloc);
+
+ /* Initializes the ruby wrapper. #new method takes an optional hash argument.
+ */
+ rb_define_method(grpc_rb_cCompressionOptions, "initialize",
+ grpc_rb_compression_options_init, -1);
+
+ /* Methods for getting the default algorithm, default level, and disabled
+ * algorithms as readable names. */
+ rb_define_method(grpc_rb_cCompressionOptions, "default_algorithm",
+ grpc_rb_compression_options_get_default_algorithm, 0);
+ rb_define_method(grpc_rb_cCompressionOptions, "default_level",
+ grpc_rb_compression_options_get_default_level, 0);
+ rb_define_method(grpc_rb_cCompressionOptions, "disabled_algorithms",
+ grpc_rb_compression_options_get_disabled_algorithms, 0);
+
+ /* Determines whether or not an algorithm is enabled, given a readable
+ * algorithm name.*/
+ rb_define_method(grpc_rb_cCompressionOptions, "algorithm_enabled?",
+ grpc_rb_compression_options_is_algorithm_enabled, 1);
+
+ /* Provides a hash of the compression settings suitable
+ * for passing to server or channel args. */
+ rb_define_method(grpc_rb_cCompressionOptions, "to_hash",
+ grpc_rb_compression_options_to_hash, 0);
+ rb_define_alias(grpc_rb_cCompressionOptions, "to_channel_arg_hash",
+ "to_hash");
+
+ /* Ruby ids for the names of the different compression levels. */
+ id_compress_level_none = rb_intern("none");
+ id_compress_level_low = rb_intern("low");
+ id_compress_level_medium = rb_intern("medium");
+ id_compress_level_high = rb_intern("high");
+}
diff --git a/src/ruby/ext/grpc/rb_compression_options.h b/src/ruby/ext/grpc/rb_compression_options.h
new file mode 100644
index 0000000000..4d5a924786
--- /dev/null
+++ b/src/ruby/ext/grpc/rb_compression_options.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_RB_COMPRESSION_OPTIONS_H_
+#define GRPC_RB_COMPRESSION_OPTIONS_H_
+
+#include <ruby/ruby.h>
+
+#include <grpc/grpc.h>
+
+/* Initializes the compression options ruby wrapper. */
+void Init_grpc_compression_options();
+
+#endif /* GRPC_RB_COMPRESSION_OPTIONS_H_ */
diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c
index 188a62475d..17cd165a91 100644
--- a/src/ruby/ext/grpc/rb_grpc.c
+++ b/src/ruby/ext/grpc/rb_grpc.c
@@ -49,6 +49,7 @@
#include "rb_loader.h"
#include "rb_server.h"
#include "rb_server_credentials.h"
+#include "rb_compression_options.h"
static VALUE grpc_rb_cTimeVal = Qnil;
@@ -220,7 +221,7 @@ static VALUE grpc_rb_time_val_to_time(VALUE self) {
time_const);
real_time = gpr_convert_clock_type(*time_const, GPR_CLOCK_REALTIME);
return rb_funcall(rb_cTime, id_at, 2, INT2NUM(real_time.tv_sec),
- INT2NUM(real_time.tv_nsec));
+ INT2NUM(real_time.tv_nsec / 1000));
}
/* Invokes inspect on the ctime version of the time val. */
@@ -332,4 +333,5 @@ void Init_grpc_c() {
Init_grpc_server_credentials();
Init_grpc_status_codes();
Init_grpc_time_consts();
+ Init_grpc_compression_options();
}
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index bf26841fd2..2a6a246e67 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -218,7 +218,7 @@ static VALUE grpc_rb_server_request_call(VALUE self) {
grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
rb_str_new2(st.details.host),
rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
- INT2NUM(deadline.tv_nsec)),
+ INT2NUM(deadline.tv_nsec / 1000)),
grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),
NULL);
grpc_request_call_stack_cleanup(&st);
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 5e6aaef2eb..6e62af94d4 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -29,5 +29,5 @@
# GRPC contains the General RPC module.
module GRPC
- VERSION = '0.16.0.dev'
+ VERSION = '1.1.0.dev'
end
diff --git a/src/ruby/pb/src/proto/grpc/testing/messages.rb b/src/ruby/pb/src/proto/grpc/testing/messages.rb
index 2bdfe0eade..e27ccd0dc0 100644
--- a/src/ruby/pb/src/proto/grpc/testing/messages.rb
+++ b/src/ruby/pb/src/proto/grpc/testing/messages.rb
@@ -4,6 +4,9 @@
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "grpc.testing.BoolValue" do
+ optional :value, :bool, 1
+ end
add_message "grpc.testing.Payload" do
optional :type, :enum, 1, "grpc.testing.PayloadType"
optional :body, :bytes, 2
@@ -18,8 +21,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
optional :payload, :message, 3, "grpc.testing.Payload"
optional :fill_username, :bool, 4
optional :fill_oauth_scope, :bool, 5
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
+ optional :response_compressed, :message, 6, "grpc.testing.BoolValue"
optional :response_status, :message, 7, "grpc.testing.EchoStatus"
+ optional :expect_compressed, :message, 8, "grpc.testing.BoolValue"
end
add_message "grpc.testing.SimpleResponse" do
optional :payload, :message, 1, "grpc.testing.Payload"
@@ -28,6 +32,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
end
add_message "grpc.testing.StreamingInputCallRequest" do
optional :payload, :message, 1, "grpc.testing.Payload"
+ optional :expect_compressed, :message, 2, "grpc.testing.BoolValue"
end
add_message "grpc.testing.StreamingInputCallResponse" do
optional :aggregated_payload_size, :int32, 1
@@ -35,12 +40,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
add_message "grpc.testing.ResponseParameters" do
optional :size, :int32, 1
optional :interval_us, :int32, 2
+ optional :compressed, :message, 3, "grpc.testing.BoolValue"
end
add_message "grpc.testing.StreamingOutputCallRequest" do
optional :response_type, :enum, 1, "grpc.testing.PayloadType"
repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters"
optional :payload, :message, 3, "grpc.testing.Payload"
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
optional :response_status, :message, 7, "grpc.testing.EchoStatus"
end
add_message "grpc.testing.StreamingOutputCallResponse" do
@@ -55,18 +60,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
end
add_enum "grpc.testing.PayloadType" do
value :COMPRESSABLE, 0
- value :UNCOMPRESSABLE, 1
- value :RANDOM, 2
- end
- add_enum "grpc.testing.CompressionType" do
- value :NONE, 0
- value :GZIP, 1
- value :DEFLATE, 2
end
end
module Grpc
module Testing
+ BoolValue = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.BoolValue").msgclass
Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass
EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass
SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass
@@ -79,6 +78,5 @@ module Grpc
ReconnectParams = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectParams").msgclass
ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass
PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule
- CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule
end
end
diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb
index 066a7bb90f..4c6d441dcb 100755
--- a/src/ruby/pb/test/client.rb
+++ b/src/ruby/pb/test/client.rb
@@ -52,9 +52,9 @@ require_relative '../../lib/grpc'
require 'googleauth'
require 'google/protobuf'
-require_relative 'proto/empty'
-require_relative 'proto/messages'
-require_relative 'proto/test_services'
+require_relative '../src/proto/grpc/testing/empty'
+require_relative '../src/proto/grpc/testing/messages'
+require_relative '../src/proto/grpc/testing/test_services'
AUTH_ENV = Google::Auth::CredentialsLoader::ENV_VAR
@@ -111,6 +111,18 @@ end
# creates a test stub that accesses host:port securely.
def create_stub(opts)
address = "#{opts.host}:#{opts.port}"
+
+ # Provide channel args that request compression by default
+ # for compression interop tests
+ if ['client_compressed_unary',
+ 'client_compressed_streaming'].include?(opts.test_case)
+ compression_options =
+ GRPC::Core::CompressionOptions.new(default_algorithm: :gzip)
+ compression_channel_args = compression_options.to_channel_arg_hash
+ else
+ compression_channel_args = {}
+ end
+
if opts.secure
creds = ssl_creds(opts.use_test_ca)
stub_opts = {
@@ -145,10 +157,15 @@ def create_stub(opts)
end
GRPC.logger.info("... connecting securely to #{address}")
+ stub_opts[:channel_args].merge!(compression_channel_args)
Grpc::Testing::TestService::Stub.new(address, creds, **stub_opts)
else
GRPC.logger.info("... connecting insecurely to #{address}")
- Grpc::Testing::TestService::Stub.new(address, :this_channel_is_insecure)
+ Grpc::Testing::TestService::Stub.new(
+ address,
+ :this_channel_is_insecure,
+ channel_args: compression_channel_args
+ )
end
end
@@ -216,10 +233,28 @@ class BlockingEnumerator
end
end
+# Intended to be used to wrap a call_op, and to adjust
+# the write flag of the call_op in between messages yielded to it.
+class WriteFlagSettingStreamingInputEnumerable
+ attr_accessor :call_op
+
+ def initialize(requests_and_write_flags)
+ @requests_and_write_flags = requests_and_write_flags
+ end
+
+ def each
+ @requests_and_write_flags.each do |request_and_flag|
+ @call_op.write_flag = request_and_flag[:write_flag]
+ yield request_and_flag[:request]
+ end
+ end
+end
+
# defines methods corresponding to each interop test case.
class NamedTests
include Grpc::Testing
include Grpc::Testing::PayloadType
+ include GRPC::Core::MetadataKeys
def initialize(stub, args)
@stub = stub
@@ -235,6 +270,48 @@ class NamedTests
perform_large_unary
end
+ def client_compressed_unary
+ # first request used also for the probe
+ req_size, wanted_response_size = 271_828, 314_159
+ expect_compressed = BoolValue.new(value: true)
+ payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
+ req = SimpleRequest.new(response_type: :COMPRESSABLE,
+ response_size: wanted_response_size,
+ payload: payload,
+ expect_compressed: expect_compressed)
+
+ # send a probe to see if CompressedResponse is supported on the server
+ send_probe_for_compressed_request_support do
+ request_uncompressed_args = {
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
+ }
+ @stub.unary_call(req, metadata: request_uncompressed_args)
+ end
+
+ # make a call with a compressed message
+ resp = @stub.unary_call(req)
+ assert('Expected second unary call with compression to work') do
+ resp.payload.body.length == wanted_response_size
+ end
+
+ # make a call with an uncompressed message
+ stub_options = {
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
+ }
+
+ req = SimpleRequest.new(
+ response_type: :COMPRESSABLE,
+ response_size: wanted_response_size,
+ payload: payload,
+ expect_compressed: BoolValue.new(value: false)
+ )
+
+ resp = @stub.unary_call(req, metadata: stub_options)
+ assert('Expected second unary call with compression to work') do
+ resp.payload.body.length == wanted_response_size
+ end
+ end
+
def service_account_creds
# ignore this test if the oauth options are not set
if @args.oauth_scope.nil?
@@ -309,6 +386,50 @@ class NamedTests
end
end
+ def client_compressed_streaming
+ # first request used also by the probe
+ first_request = StreamingInputCallRequest.new(
+ payload: Payload.new(type: :COMPRESSABLE, body: nulls(27_182)),
+ expect_compressed: BoolValue.new(value: true)
+ )
+
+ # send a probe to see if CompressedResponse is supported on the server
+ send_probe_for_compressed_request_support do
+ request_uncompressed_args = {
+ COMPRESSION_REQUEST_ALGORITHM => 'identity'
+ }
+ @stub.streaming_input_call([first_request],
+ metadata: request_uncompressed_args)
+ end
+
+ second_request = StreamingInputCallRequest.new(
+ payload: Payload.new(type: :COMPRESSABLE, body: nulls(45_904)),
+ expect_compressed: BoolValue.new(value: false)
+ )
+
+ # Create the requests messages and the corresponding write flags
+ # for each message
+ requests = WriteFlagSettingStreamingInputEnumerable.new([
+ { request: first_request,
+ write_flag: 0 },
+ { request: second_request,
+ write_flag: GRPC::Core::WriteFlags::NO_COMPRESS }
+ ])
+
+ # Create the call_op, pass it to the requests enumerable, and
+ # run the call
+ call_op = @stub.streaming_input_call(requests,
+ return_op: true)
+ requests.call_op = call_op
+ resp = call_op.execute
+
+ wanted_aggregate_size = 73_086
+
+ assert("#{__callee__}: aggregate payload size is incorrect") do
+ wanted_aggregate_size == resp.aggregated_payload_size
+ end
+ end
+
def server_streaming
msg_sizes = [31_415, 9, 2653, 58_979]
response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) }
@@ -415,6 +536,29 @@ class NamedTests
end
resp
end
+
+ # Send probing message for compressed request on the server, to see
+ # if it's implemented.
+ def send_probe_for_compressed_request_support(&send_probe)
+ bad_status_occured = false
+
+ begin
+ send_probe.call
+ rescue GRPC::BadStatus => e
+ if e.code == GRPC::Core::StatusCodes::INVALID_ARGUMENT
+ bad_status_occured = true
+ else
+ fail AssertionError, "Bad status received but code is #{e.code}"
+ end
+ rescue Exception => e
+ fail AssertionError, "Expected BadStatus. Received: #{e.inspect}"
+ end
+
+ assert('CompressedRequest probe failed') do
+ bad_status_occured
+ end
+ end
+
end
# Args is used to hold the command line info.
diff --git a/src/ruby/pb/test/proto/empty.rb b/src/ruby/pb/test/proto/empty.rb
deleted file mode 100644
index 559adcc85e..0000000000
--- a/src/ruby/pb/test/proto/empty.rb
+++ /dev/null
@@ -1,15 +0,0 @@
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: test/proto/empty.proto
-
-require 'google/protobuf'
-
-Google::Protobuf::DescriptorPool.generated_pool.build do
- add_message "grpc.testing.Empty" do
- end
-end
-
-module Grpc
- module Testing
- Empty = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Empty").msgclass
- end
-end
diff --git a/src/ruby/pb/test/proto/messages.rb b/src/ruby/pb/test/proto/messages.rb
deleted file mode 100644
index 5222c9824a..0000000000
--- a/src/ruby/pb/test/proto/messages.rb
+++ /dev/null
@@ -1,80 +0,0 @@
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: test/proto/messages.proto
-
-require 'google/protobuf'
-
-Google::Protobuf::DescriptorPool.generated_pool.build do
- add_message "grpc.testing.Payload" do
- optional :type, :enum, 1, "grpc.testing.PayloadType"
- optional :body, :bytes, 2
- end
- add_message "grpc.testing.EchoStatus" do
- optional :code, :int32, 1
- optional :message, :string, 2
- end
- add_message "grpc.testing.SimpleRequest" do
- optional :response_type, :enum, 1, "grpc.testing.PayloadType"
- optional :response_size, :int32, 2
- optional :payload, :message, 3, "grpc.testing.Payload"
- optional :fill_username, :bool, 4
- optional :fill_oauth_scope, :bool, 5
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
- optional :response_status, :message, 7, "grpc.testing.EchoStatus"
- end
- add_message "grpc.testing.SimpleResponse" do
- optional :payload, :message, 1, "grpc.testing.Payload"
- optional :username, :string, 2
- optional :oauth_scope, :string, 3
- end
- add_message "grpc.testing.StreamingInputCallRequest" do
- optional :payload, :message, 1, "grpc.testing.Payload"
- end
- add_message "grpc.testing.StreamingInputCallResponse" do
- optional :aggregated_payload_size, :int32, 1
- end
- add_message "grpc.testing.ResponseParameters" do
- optional :size, :int32, 1
- optional :interval_us, :int32, 2
- end
- add_message "grpc.testing.StreamingOutputCallRequest" do
- optional :response_type, :enum, 1, "grpc.testing.PayloadType"
- repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters"
- optional :payload, :message, 3, "grpc.testing.Payload"
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
- optional :response_status, :message, 7, "grpc.testing.EchoStatus"
- end
- add_message "grpc.testing.StreamingOutputCallResponse" do
- optional :payload, :message, 1, "grpc.testing.Payload"
- end
- add_message "grpc.testing.ReconnectInfo" do
- optional :passed, :bool, 1
- repeated :backoff_ms, :int32, 2
- end
- add_enum "grpc.testing.PayloadType" do
- value :COMPRESSABLE, 0
- value :UNCOMPRESSABLE, 1
- value :RANDOM, 2
- end
- add_enum "grpc.testing.CompressionType" do
- value :NONE, 0
- value :GZIP, 1
- value :DEFLATE, 2
- end
-end
-
-module Grpc
- module Testing
- Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass
- EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass
- SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass
- SimpleResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleResponse").msgclass
- StreamingInputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallRequest").msgclass
- StreamingInputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallResponse").msgclass
- ResponseParameters = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ResponseParameters").msgclass
- StreamingOutputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallRequest").msgclass
- StreamingOutputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallResponse").msgclass
- ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass
- PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule
- CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule
- end
-end
diff --git a/src/ruby/pb/test/proto/test.rb b/src/ruby/pb/test/proto/test.rb
deleted file mode 100644
index 100eb6505c..0000000000
--- a/src/ruby/pb/test/proto/test.rb
+++ /dev/null
@@ -1,14 +0,0 @@
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: test/proto/test.proto
-
-require 'google/protobuf'
-
-require 'test/proto/empty'
-require 'test/proto/messages'
-Google::Protobuf::DescriptorPool.generated_pool.build do
-end
-
-module Grpc
- module Testing
- end
-end
diff --git a/src/ruby/pb/test/proto/test_services.rb b/src/ruby/pb/test/proto/test_services.rb
deleted file mode 100644
index 9df9cc5860..0000000000
--- a/src/ruby/pb/test/proto/test_services.rb
+++ /dev/null
@@ -1,64 +0,0 @@
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# Source: test/proto/test.proto for package 'grpc.testing'
-
-require 'grpc'
-require 'test/proto/test'
-
-module Grpc
- module Testing
- module TestService
-
- # TODO: add proto service documentation here
- class Service
-
- include GRPC::GenericService
-
- self.marshal_class_method = :encode
- self.unmarshal_class_method = :decode
- self.service_name = 'grpc.testing.TestService'
-
- rpc :EmptyCall, Empty, Empty
- rpc :UnaryCall, SimpleRequest, SimpleResponse
- rpc :StreamingOutputCall, StreamingOutputCallRequest, stream(StreamingOutputCallResponse)
- rpc :StreamingInputCall, stream(StreamingInputCallRequest), StreamingInputCallResponse
- rpc :FullDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse)
- rpc :HalfDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse)
- end
-
- Stub = Service.rpc_stub_class
- end
- module UnimplementedService
-
- # TODO: add proto service documentation here
- class Service
-
- include GRPC::GenericService
-
- self.marshal_class_method = :encode
- self.unmarshal_class_method = :decode
- self.service_name = 'grpc.testing.UnimplementedService'
-
- rpc :UnimplementedCall, Empty, Empty
- end
-
- Stub = Service.rpc_stub_class
- end
- module ReconnectService
-
- # TODO: add proto service documentation here
- class Service
-
- include GRPC::GenericService
-
- self.marshal_class_method = :encode
- self.unmarshal_class_method = :decode
- self.service_name = 'grpc.testing.ReconnectService'
-
- rpc :Start, Empty, Empty
- rpc :Stop, Empty, ReconnectInfo
- end
-
- Stub = Service.rpc_stub_class
- end
- end
-end
diff --git a/src/ruby/pb/test/server.rb b/src/ruby/pb/test/server.rb
index 088f281dc4..11ee3d465d 100755
--- a/src/ruby/pb/test/server.rb
+++ b/src/ruby/pb/test/server.rb
@@ -50,9 +50,9 @@ require 'optparse'
require 'grpc'
-require 'test/proto/empty'
-require 'test/proto/messages'
-require 'test/proto/test_services'
+require_relative '../src/proto/grpc/testing/empty'
+require_relative '../src/proto/grpc/testing/messages'
+require_relative '../src/proto/grpc/testing/test_services'
# DebugIsTruncated extends the default Logger to truncate debug messages
class DebugIsTruncated < Logger
diff --git a/src/ruby/qps/src/proto/grpc/testing/messages.rb b/src/ruby/qps/src/proto/grpc/testing/messages.rb
index 2bdfe0eade..e27ccd0dc0 100644
--- a/src/ruby/qps/src/proto/grpc/testing/messages.rb
+++ b/src/ruby/qps/src/proto/grpc/testing/messages.rb
@@ -4,6 +4,9 @@
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "grpc.testing.BoolValue" do
+ optional :value, :bool, 1
+ end
add_message "grpc.testing.Payload" do
optional :type, :enum, 1, "grpc.testing.PayloadType"
optional :body, :bytes, 2
@@ -18,8 +21,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
optional :payload, :message, 3, "grpc.testing.Payload"
optional :fill_username, :bool, 4
optional :fill_oauth_scope, :bool, 5
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
+ optional :response_compressed, :message, 6, "grpc.testing.BoolValue"
optional :response_status, :message, 7, "grpc.testing.EchoStatus"
+ optional :expect_compressed, :message, 8, "grpc.testing.BoolValue"
end
add_message "grpc.testing.SimpleResponse" do
optional :payload, :message, 1, "grpc.testing.Payload"
@@ -28,6 +32,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
end
add_message "grpc.testing.StreamingInputCallRequest" do
optional :payload, :message, 1, "grpc.testing.Payload"
+ optional :expect_compressed, :message, 2, "grpc.testing.BoolValue"
end
add_message "grpc.testing.StreamingInputCallResponse" do
optional :aggregated_payload_size, :int32, 1
@@ -35,12 +40,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
add_message "grpc.testing.ResponseParameters" do
optional :size, :int32, 1
optional :interval_us, :int32, 2
+ optional :compressed, :message, 3, "grpc.testing.BoolValue"
end
add_message "grpc.testing.StreamingOutputCallRequest" do
optional :response_type, :enum, 1, "grpc.testing.PayloadType"
repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters"
optional :payload, :message, 3, "grpc.testing.Payload"
- optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
optional :response_status, :message, 7, "grpc.testing.EchoStatus"
end
add_message "grpc.testing.StreamingOutputCallResponse" do
@@ -55,18 +60,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
end
add_enum "grpc.testing.PayloadType" do
value :COMPRESSABLE, 0
- value :UNCOMPRESSABLE, 1
- value :RANDOM, 2
- end
- add_enum "grpc.testing.CompressionType" do
- value :NONE, 0
- value :GZIP, 1
- value :DEFLATE, 2
end
end
module Grpc
module Testing
+ BoolValue = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.BoolValue").msgclass
Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass
EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass
SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass
@@ -79,6 +78,5 @@ module Grpc
ReconnectParams = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectParams").msgclass
ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass
PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule
- CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule
end
end
diff --git a/src/ruby/spec/compression_options_spec.rb b/src/ruby/spec/compression_options_spec.rb
new file mode 100644
index 0000000000..dbd7e59294
--- /dev/null
+++ b/src/ruby/spec/compression_options_spec.rb
@@ -0,0 +1,164 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'grpc'
+
+describe GRPC::Core::CompressionOptions do
+ # Note these constants should be updated
+ # according to what the core lib provides.
+
+ # Names of supported compression algorithms
+ ALGORITHMS = [:identity, :deflate, :gzip]
+
+ # Names of valid supported compression levels
+ COMPRESS_LEVELS = [:none, :low, :medium, :high]
+
+ it 'implements to_s' do
+ expect { GRPC::Core::CompressionOptions.new.to_s }.to_not raise_error
+ end
+
+ it '#to_channel_arg_hash gives the same result as #to_hash' do
+ options = GRPC::Core::CompressionOptions.new
+ expect(options.to_channel_arg_hash).to eq(options.to_hash)
+ end
+
+ # Test the normal call sequence of creating an instance
+ # and then obtaining the resulting channel-arg hash that
+ # corresponds to the compression settings of the instance
+ describe 'creating, reading, and converting to channel args hash' do
+ it 'works when no optional args were provided' do
+ options = GRPC::Core::CompressionOptions.new
+
+ ALGORITHMS.each do |algorithm|
+ expect(options.algorithm_enabled?(algorithm)).to be true
+ end
+
+ expect(options.disabled_algorithms).to be_empty
+ expect(options.default_algorithm).to be nil
+ expect(options.default_level).to be nil
+ expect(options.to_hash).to be_instance_of(Hash)
+ end
+
+ it 'works when disabling multiple algorithms' do
+ options = GRPC::Core::CompressionOptions.new(
+ default_algorithm: :identity,
+ default_level: :none,
+ disabled_algorithms: [:gzip, :deflate]
+ )
+
+ [:gzip, :deflate].each do |algorithm|
+ expect(options.algorithm_enabled?(algorithm)).to be false
+ expect(options.disabled_algorithms.include?(algorithm)).to be true
+ end
+
+ expect(options.default_algorithm).to be(:identity)
+ expect(options.default_level).to be(:none)
+ expect(options.to_hash).to be_instance_of(Hash)
+ end
+
+ it 'works when all optional args have been set' do
+ options = GRPC::Core::CompressionOptions.new(
+ default_algorithm: :gzip,
+ default_level: :low,
+ disabled_algorithms: [:deflate]
+ )
+
+ expect(options.algorithm_enabled?(:deflate)).to be false
+ expect(options.algorithm_enabled?(:gzip)).to be true
+ expect(options.disabled_algorithms).to eq([:deflate])
+
+ expect(options.default_algorithm).to be(:gzip)
+ expect(options.default_level).to be(:low)
+ expect(options.to_hash).to be_instance_of(Hash)
+ end
+
+ it 'doesnt fail when no algorithms are disabled' do
+ options = GRPC::Core::CompressionOptions.new(
+ default_algorithm: :identity,
+ default_level: :high
+ )
+
+ ALGORITHMS.each do |algorithm|
+ expect(options.algorithm_enabled?(algorithm)).to be(true)
+ end
+
+ expect(options.disabled_algorithms).to be_empty
+ expect(options.default_algorithm).to be(:identity)
+ expect(options.default_level).to be(:high)
+ expect(options.to_hash).to be_instance_of(Hash)
+ end
+ end
+
+ describe '#new with bad parameters' do
+ it 'should fail with more than one parameter' do
+ blk = proc { GRPC::Core::CompressionOptions.new(:gzip, :none) }
+ expect { blk.call }.to raise_error
+ end
+
+ it 'should fail with a non-hash parameter' do
+ blk = proc { GRPC::Core::CompressionOptions.new(:gzip) }
+ expect { blk.call }.to raise_error
+ end
+ end
+
+ describe '#default_algorithm' do
+ it 'returns nil if unset' do
+ options = GRPC::Core::CompressionOptions.new
+ expect(options.default_algorithm).to be(nil)
+ end
+ end
+
+ describe '#default_level' do
+ it 'returns nil if unset' do
+ options = GRPC::Core::CompressionOptions.new
+ expect(options.default_level).to be(nil)
+ end
+ end
+
+ describe '#disabled_algorithms' do
+ it 'returns an empty list if no algorithms were disabled' do
+ options = GRPC::Core::CompressionOptions.new
+ expect(options.disabled_algorithms).to be_empty
+ end
+ end
+
+ describe '#algorithm_enabled?' do
+ [:none, :any, 'gzip', Object.new, 1].each do |name|
+ it "should fail for parameter ${name} of class #{name.class}" do
+ options = GRPC::Core::CompressionOptions.new(
+ disabled_algorithms: [:gzip])
+
+ blk = proc do
+ options.algorithm_enabled?(name)
+ end
+ expect { blk.call }.to raise_error
+ end
+ end
+ end
+end
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index 68c1bf369d..e457ec09dd 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -29,6 +29,6 @@
module GRPC
module Tools
- VERSION = '0.16.0.dev'
+ VERSION = '1.1.0.dev'
end
end
diff --git a/templates/composer.json.template b/templates/composer.json.template
index c9ffbbcbd0..07ab1f20eb 100644
--- a/templates/composer.json.template
+++ b/templates/composer.json.template
@@ -7,15 +7,9 @@
"keywords": ["rpc"],
"homepage": "http://grpc.io",
"license": "BSD-3-Clause",
- "repositories": [
- {
- "type": "vcs",
- "url": "https://github.com/stanley-cheung/Protobuf-PHP"
- }
- ],
"require": {
"php": ">=5.5.0",
- "datto/protobuf-php": "dev-master"
+ "stanley-cheung/protobuf-php": "dev-master"
},
"require-dev": {
"google/auth": "v0.9"
diff --git a/templates/package.xml.template b/templates/package.xml.template
index d8155cdd82..153823ece5 100644
--- a/templates/package.xml.template
+++ b/templates/package.xml.template
@@ -12,18 +12,19 @@
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
- <date>2016-06-30</date>
+ <date>2016-07-13</date>
<time>16:06:07</time>
<version>
<release>${settings.php_version.php()}</release>
<api>${settings.php_version.php()}</api>
</version>
<stability>
- <release>beta</release>
- <api>beta</api>
+ <release>stable</release>
+ <api>stable</api>
</stability>
<license>BSD</license>
<notes>
+ - GA release
- Fix shutdown hang problem #4017
</notes>
<contents>
@@ -205,16 +206,17 @@
</release>
<release>
<version>
- <release>0.15.1</release>
- <api>0.15.1</api>
+ <release>1.0.0</release>
+ <api>1.0.0</api>
</version>
<stability>
- <release>beta</release>
- <api>beta</api>
+ <release>stable</release>
+ <api>stable</api>
</stability>
- <date>2016-06-30</date>
+ <date>2016-07-13</date>
<license>BSD</license>
<notes>
+ - GA release
- Fix shutdown hang problem #4017
</notes>
</release>
diff --git a/templates/tools/dockerfile/apt_get_pyenv.include b/templates/tools/dockerfile/apt_get_pyenv.include
new file mode 100644
index 0000000000..70e90289b7
--- /dev/null
+++ b/templates/tools/dockerfile/apt_get_pyenv.include
@@ -0,0 +1,18 @@
+# Install dependencies for pyenv
+RUN apt-get update && apt-get install -y ${'\\'}
+ libbz2-dev ${'\\'}
+ libncurses5-dev ${'\\'}
+ libncursesw5-dev ${'\\'}
+ libreadline-dev ${'\\'}
+ libsqlite3-dev ${'\\'}
+ libssl-dev ${'\\'}
+ llvm ${'\\'}
+ mercurial ${'\\'}
+ zlib1g-dev && apt-get clean
+
+# Install Pyenv and dev Python versions 3.5 and 3.6
+RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
+RUN pyenv update
+RUN pyenv install 3.5-dev
+RUN pyenv install 3.6-dev
+RUN pyenv local 3.5-dev 3.6-dev
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
index 4cb8d3b088..da0c70aee0 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../csharp_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
index e39175a1ea..aba0a0497e 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
index 542c81d614..38a5ca725d 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
@@ -32,6 +32,7 @@
FROM golang:1.5
<%include file="../../go_path.include"/>
+ <%include file="../../python_deps.include"/>
# Define the default command.
CMD ["bash"]
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
index 542c81d614..38a5ca725d 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
@@ -32,6 +32,7 @@
FROM golang:1.5
<%include file="../../go_path.include"/>
+ <%include file="../../python_deps.include"/>
# Define the default command.
CMD ["bash"]
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
index c286e80826..3d5e7ee120 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
@@ -32,7 +32,8 @@
FROM debian:jessie
<%include file="../../java_deps.include"/>
-
+ <%include file="../../python_deps.include"/>
+
# Trigger download of as many Gradle artifacts as possible.
RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && ${'\\'}
cd grpc-java && ${'\\'}
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
index 89bb9acc1a..01a03073a6 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../node_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
index 476f9d3d3e..6232e081eb 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../php_deps.include"/>
<%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
index c3625b91fc..fbd8242391 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/python_deps.include b/templates/tools/dockerfile/python_deps.include
index 3162364048..26c91f495d 100644
--- a/templates/tools/dockerfile/python_deps.include
+++ b/templates/tools/dockerfile/python_deps.include
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y ${'\\'}
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
index 074178252d..5d805bb4b2 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
index ac087c5da7..18f06b770c 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
index 3ed3d6556f..e02254cd53 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
@@ -32,6 +32,7 @@
FROM golang:1.5
<%include file="../../gcp_api_libraries.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../go_path.include"/>
# Define the default command.
CMD ["bash"]
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
index 17ed99fd2e..2bb2f9ba1e 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
index c50d38d1ec..d70b751b14 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../node_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
index 4cd069da34..f8dc079474 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../php_deps.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
index 8b933aaa32..18199771d7 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template
index e9cab57019..24dad48807 100644
--- a/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../csharp_deps.include"/>
# Install dotnet SDK based on https://www.microsoft.com/net/core#debian
diff --git a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
index 4cb8d3b088..da0c70aee0 100644
--- a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../csharp_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
index 04abf9f741..04767248b8 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../clang_update.include"/>
<%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
index 7f43e759fc..49fbea0f45 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
@@ -32,8 +32,8 @@
FROM 32bit/debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
CMD ["bash"]
- \ No newline at end of file
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
index 4950a82d2d..8a95cad649 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM ubuntu:14.04
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons_nocache.include"/>
# Define the default command.
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
index e39537975b..42ad6c130d 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM ubuntu:16.04
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
@@ -42,4 +43,3 @@
# Define the default command.
CMD ["bash"]
- \ No newline at end of file
diff --git a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
index e77b3d9e41..b6a3b0d5d2 100644
--- a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:wheezy
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
RUN apt-get update && apt-get install -y ${'\\'}
diff --git a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
index 33df275908..6d7cb72f27 100644
--- a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
+++ b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
@@ -32,6 +32,7 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../clang_update.include"/>
<%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
index 5a6233343e..72b098f0c2 100644
--- a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../node_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
CMD ["bash"]
- \ No newline at end of file
diff --git a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
index fffac89efc..0cfa373c90 100644
--- a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../php_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
CMD ["bash"]
- \ No newline at end of file
diff --git a/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
new file mode 100644
index 0000000000..f9a4dcb7b6
--- /dev/null
+++ b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
@@ -0,0 +1,39 @@
+%YAML 1.2
+--- |
+ # Copyright 2016, Google Inc.
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted provided that the following conditions are
+ # met:
+ #
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+ # * Redistributions in binary form must reproduce the above
+ # copyright notice, this list of conditions and the following disclaimer
+ # in the documentation and/or other materials provided with the
+ # distribution.
+ # * Neither the name of Google Inc. nor the names of its
+ # contributors may be used to endorse or promote products derived from
+ # this software without specific prior written permission.
+ #
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ FROM debian:jessie
+
+ <%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
+ <%include file="../../apt_get_pyenv.include"/>
+ <%include file="../../run_tests_addons.include"/>
+ # Define the default command.
+ CMD ["bash"]
diff --git a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
index 70baddffbf..35838bc11e 100644
--- a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command.
CMD ["bash"]
- \ No newline at end of file
diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template
index 9987e35260..12309b64d1 100644
--- a/templates/tools/dockerfile/test/sanity/Dockerfile.template
+++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template
@@ -32,6 +32,7 @@
FROM ubuntu:15.10
<%include file="../../apt_get_basic.include"/>
+ <%include file="../../python_deps.include"/>
#========================
# Sanity test dependencies
RUN apt-get update && apt-get install -y ${"\\"}
diff --git a/test/core/end2end/dualstack_socket_test.c b/test/core/end2end/dualstack_socket_test.c
index 65a8deb663..348b9ed5f0 100644
--- a/test/core/end2end/dualstack_socket_test.c
+++ b/test/core/end2end/dualstack_socket_test.c
@@ -273,7 +273,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
}
int external_dns_works(const char *host) {
- grpc_resolved_addresses *res;
+ grpc_resolved_addresses *res = NULL;
grpc_error *error = grpc_blocking_resolve_address(host, "80", &res);
GRPC_ERROR_UNREF(error);
if (res != NULL) {
diff --git a/test/core/end2end/tests/high_initial_seqno.c b/test/core/end2end/tests/high_initial_seqno.c
index 50e3c9cb89..db45f5eb5a 100644
--- a/test/core/end2end/tests/high_initial_seqno.c
+++ b/test/core/end2end/tests/high_initial_seqno.c
@@ -203,6 +203,12 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
grpc_call_destroy(c);
grpc_call_destroy(s);
+ /* TODO(ctiller): this rate limits the test, and it should be removed when
+ retry has been implemented; until then cross-thread chatter
+ may result in some requests needing to be cancelled due to
+ seqno exhaustion. */
+ cq_verify_empty(cqv);
+
cq_verifier_destroy(cqv);
}
diff --git a/test/core/end2end/tests/network_status_change.c b/test/core/end2end/tests/network_status_change.c
index 10207844ab..39ddc13754 100644
--- a/test/core/end2end/tests/network_status_change.c
+++ b/test/core/end2end/tests/network_status_change.c
@@ -186,9 +186,10 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(102), 1);
+ cq_verify(cqv);
+
// Simulate the network loss event
grpc_network_status_shutdown_all_endpoints();
- cq_verify(cqv);
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
@@ -205,7 +206,7 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
- void shutdown_all_endpoints();
+
cq_expect_completion(cqv, tag(103), 1);
cq_expect_completion(cqv, tag(1), 1);
cq_verify(cqv);
diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c
index 5e86c42309..27d630623e 100644
--- a/test/core/internal_api_canaries/iomgr.c
+++ b/test/core/internal_api_canaries/iomgr.c
@@ -77,11 +77,14 @@ static void test_code(void) {
/* endpoint.h */
grpc_endpoint endpoint;
- grpc_endpoint_vtable vtable = {
- grpc_endpoint_read, grpc_endpoint_write,
- grpc_endpoint_add_to_pollset, grpc_endpoint_add_to_pollset_set,
- grpc_endpoint_shutdown, grpc_endpoint_destroy,
- grpc_endpoint_get_peer};
+ grpc_endpoint_vtable vtable = {grpc_endpoint_read,
+ grpc_endpoint_write,
+ grpc_endpoint_get_workqueue,
+ grpc_endpoint_add_to_pollset,
+ grpc_endpoint_add_to_pollset_set,
+ grpc_endpoint_shutdown,
+ grpc_endpoint_destroy,
+ grpc_endpoint_get_peer};
endpoint.vtable = &vtable;
grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
diff --git a/test/core/iomgr/workqueue_test.c b/test/core/iomgr/workqueue_test.c
deleted file mode 100644
index 76ecfae74b..0000000000
--- a/test/core/iomgr/workqueue_test.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "test/core/util/test_config.h"
-
-static gpr_mu *g_mu;
-static grpc_pollset *g_pollset;
-
-static void must_succeed(grpc_exec_ctx *exec_ctx, void *p, grpc_error *error) {
- GPR_ASSERT(error == GRPC_ERROR_NONE);
- gpr_mu_lock(g_mu);
- *(int *)p = 1;
- GPR_ASSERT(
- GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL)));
- gpr_mu_unlock(g_mu);
-}
-
-static void test_ref_unref(void) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_workqueue *wq;
- GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create",
- grpc_workqueue_create(&exec_ctx, &wq)));
- GRPC_WORKQUEUE_REF(wq, "test");
- GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "test");
- GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy");
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void test_add_closure(void) {
- grpc_closure c;
- int done = 0;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_workqueue *wq;
- GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create",
- grpc_workqueue_create(&exec_ctx, &wq)));
- gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5);
- grpc_pollset_worker *worker = NULL;
- grpc_closure_init(&c, must_succeed, &done);
-
- grpc_workqueue_enqueue(&exec_ctx, wq, &c, GRPC_ERROR_NONE);
- grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
-
- gpr_mu_lock(g_mu);
- GPR_ASSERT(!done);
- while (!done) {
- GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(deadline.clock_type), deadline)));
- }
- gpr_mu_unlock(g_mu);
- grpc_exec_ctx_finish(&exec_ctx);
- GPR_ASSERT(done);
-
- GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy");
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void test_flush(void) {
- grpc_closure c;
- int done = 0;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_workqueue *wq;
- GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create",
- grpc_workqueue_create(&exec_ctx, &wq)));
- gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5);
- grpc_pollset_worker *worker = NULL;
- grpc_closure_init(&c, must_succeed, &done);
-
- grpc_exec_ctx_sched(&exec_ctx, &c, GRPC_ERROR_NONE, NULL);
- grpc_workqueue_flush(&exec_ctx, wq);
- grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
-
- gpr_mu_lock(g_mu);
- GPR_ASSERT(!done);
- while (!done) {
- GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(deadline.clock_type), deadline)));
- }
- gpr_mu_unlock(g_mu);
- grpc_exec_ctx_finish(&exec_ctx);
- GPR_ASSERT(done);
-
- GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy");
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p,
- grpc_error *error) {
- grpc_pollset_destroy(p);
-}
-
-int main(int argc, char **argv) {
- grpc_closure destroyed;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_test_init(argc, argv);
- grpc_init();
- g_pollset = gpr_malloc(grpc_pollset_size());
- grpc_pollset_init(g_pollset, &g_mu);
-
- test_ref_unref();
- test_add_closure();
- test_flush();
-
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
- grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
- grpc_exec_ctx_finish(&exec_ctx);
- grpc_shutdown();
-
- gpr_free(g_pollset);
- return 0;
-}
diff --git a/test/core/surface/sequential_connectivity_test.c b/test/core/surface/sequential_connectivity_test.c
index 2fba3927ba..fe87f119f2 100644
--- a/test/core/surface/sequential_connectivity_test.c
+++ b/test/core/surface/sequential_connectivity_test.c
@@ -154,7 +154,7 @@ static void secure_test_add_port(grpc_server *server, const char *addr) {
static grpc_channel *secure_test_create_channel(const char *addr) {
grpc_channel_credentials *ssl_creds =
- grpc_ssl_credentials_create(NULL, NULL, NULL);
+ grpc_ssl_credentials_create(test_root_cert, NULL, NULL);
grpc_arg ssl_name_override = {GRPC_ARG_STRING,
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
{"foo.test.google.fr"}};
diff --git a/test/core/transport/chttp2/status_conversion_test.c b/test/core/transport/chttp2/status_conversion_test.c
index e6fc785728..f5a5cd1395 100644
--- a/test/core/transport/chttp2/status_conversion_test.c
+++ b/test/core/transport/chttp2/status_conversion_test.c
@@ -37,8 +37,8 @@
#define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \
GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_error(a) == (b))
-#define HTTP2_ERROR_TO_GRPC_STATUS(a, b) \
- GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a) == (b))
+#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \
+ GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a, deadline) == (b))
#define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \
GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_status(a) == (b))
#define HTTP2_STATUS_TO_GRPC_STATUS(a, b) \
@@ -54,8 +54,7 @@ int main(int argc, char **argv) {
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_UNKNOWN, GRPC_CHTTP2_INTERNAL_ERROR);
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_INVALID_ARGUMENT,
GRPC_CHTTP2_INTERNAL_ERROR);
- GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED,
- GRPC_CHTTP2_INTERNAL_ERROR);
+ GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED, GRPC_CHTTP2_CANCEL);
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_NOT_FOUND, GRPC_CHTTP2_INTERNAL_ERROR);
GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_ALREADY_EXISTS,
GRPC_CHTTP2_INTERNAL_ERROR);
@@ -95,25 +94,60 @@ int main(int argc, char **argv) {
GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_UNAVAILABLE, 200);
GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_DATA_LOSS, 200);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR,
+ const gpr_timespec before_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, before_deadline,
GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, before_deadline,
GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, before_deadline,
GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, before_deadline,
GRPC_STATUS_UNAVAILABLE);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, GRPC_STATUS_CANCELLED);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, before_deadline,
+ GRPC_STATUS_CANCELLED);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, before_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, before_deadline,
+ GRPC_STATUS_RESOURCE_EXHAUSTED);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, before_deadline,
+ GRPC_STATUS_PERMISSION_DENIED);
+
+ const gpr_timespec after_deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, after_deadline,
+ GRPC_STATUS_UNAVAILABLE);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, after_deadline,
+ GRPC_STATUS_DEADLINE_EXCEEDED);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, after_deadline,
+ GRPC_STATUS_INTERNAL);
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, after_deadline,
GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, GRPC_STATUS_INTERNAL);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, after_deadline,
GRPC_STATUS_RESOURCE_EXHAUSTED);
- HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY,
+ HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, after_deadline,
GRPC_STATUS_PERMISSION_DENIED);
HTTP2_STATUS_TO_GRPC_STATUS(200, GRPC_STATUS_OK);
diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c
index ed9545e9df..13e0e918fb 100644
--- a/test/core/util/mock_endpoint.c
+++ b/test/core/util/mock_endpoint.c
@@ -95,9 +95,17 @@ static char *me_get_peer(grpc_endpoint *ep) {
return gpr_strdup("fake:mock_endpoint");
}
+static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
+
static const grpc_endpoint_vtable vtable = {
- me_read, me_write, me_add_to_pollset, me_add_to_pollset_set,
- me_shutdown, me_destroy, me_get_peer,
+ me_read,
+ me_write,
+ me_get_workqueue,
+ me_add_to_pollset,
+ me_add_to_pollset_set,
+ me_shutdown,
+ me_destroy,
+ me_get_peer,
};
grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice)) {
diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c
index a39f3dd66e..7ed9e97bd6 100644
--- a/test/core/util/passthru_endpoint.c
+++ b/test/core/util/passthru_endpoint.c
@@ -140,9 +140,17 @@ static char *me_get_peer(grpc_endpoint *ep) {
return gpr_strdup("fake:mock_endpoint");
}
+static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
+
static const grpc_endpoint_vtable vtable = {
- me_read, me_write, me_add_to_pollset, me_add_to_pollset_set,
- me_shutdown, me_destroy, me_get_peer,
+ me_read,
+ me_write,
+ me_get_workqueue,
+ me_add_to_pollset,
+ me_add_to_pollset_set,
+ me_shutdown,
+ me_destroy,
+ me_get_peer,
};
static void half_init(half *m, passthru_endpoint *parent) {
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index 6c7eae53a4..4a8936d281 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -345,6 +345,31 @@ TEST_P(AsyncEnd2endTest, SequentialRpcs) {
SendRpc(10);
}
+// We do not need to protect notify because the use is synchronized.
+void ServerWait(Server* server, int* notify) {
+ server->Wait();
+ *notify = 1;
+}
+TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
+ int notify = 0;
+ std::thread* wait_thread =
+ new std::thread(&ServerWait, server_.get(), &notify);
+ ResetStub();
+ SendRpc(1);
+ EXPECT_EQ(0, notify);
+ server_->Shutdown();
+ wait_thread->join();
+ EXPECT_EQ(1, notify);
+ delete wait_thread;
+}
+
+TEST_P(AsyncEnd2endTest, ShutdownThenWait) {
+ ResetStub();
+ SendRpc(1);
+ server_->Shutdown();
+ server_->Wait();
+}
+
// Test a simple RPC using the async version of Next
TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
ResetStub();
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 354a59cedd..0f87ae3e44 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -1166,6 +1166,9 @@ TEST_P(ProxyEnd2endTest, HugeResponse) {
request.mutable_param()->set_response_message_length(kResponseSize);
ClientContext context;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::seconds(20);
+ context.set_deadline(deadline);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(kResponseSize, response.message().size());
EXPECT_TRUE(s.ok());
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index 047bd16408..4045e13460 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -112,6 +112,21 @@ class ClientRequestCreator<ByteBuffer> {
}
};
+class HistogramEntry GRPC_FINAL {
+ public:
+ HistogramEntry() : used_(false) {}
+ bool used() const { return used_; }
+ double value() const { return value_; }
+ void set_value(double v) {
+ used_ = true;
+ value_ = v;
+ }
+
+ private:
+ bool used_;
+ double value_;
+};
+
class Client {
public:
Client() : timer_(new UsageTimer), interarrival_timer_() {}
@@ -151,10 +166,21 @@ class Client {
return stats;
}
+ // Must call AwaitThreadsCompletion before destructor to avoid a race
+ // between destructor and invocation of virtual ThreadFunc
+ void AwaitThreadsCompletion() {
+ DestroyMultithreading();
+ std::unique_lock<std::mutex> g(thread_completion_mu_);
+ while (threads_remaining_ != 0) {
+ threads_complete_.wait(g);
+ }
+ }
+
protected:
bool closed_loop_;
void StartThreads(size_t num_threads) {
+ threads_remaining_ = num_threads;
for (size_t i = 0; i < num_threads; i++) {
threads_.emplace_back(new Thread(this, i));
}
@@ -162,7 +188,8 @@ class Client {
void EndThreads() { threads_.clear(); }
- virtual bool ThreadFunc(Histogram* histogram, size_t thread_idx) = 0;
+ virtual void DestroyMultithreading() = 0;
+ virtual bool ThreadFunc(HistogramEntry* histogram, size_t thread_idx) = 0;
void SetupLoadTest(const ClientConfig& config, size_t num_threads) {
// Set up the load distribution based on the number of threads
@@ -215,7 +242,6 @@ class Client {
public:
Thread(Client* client, size_t idx)
: done_(false),
- new_stats_(nullptr),
client_(client),
idx_(idx),
impl_(&Thread::ThreadFunc, this) {}
@@ -230,15 +256,10 @@ class Client {
void BeginSwap(Histogram* n) {
std::lock_guard<std::mutex> g(mu_);
- new_stats_ = n;
+ n->Swap(&histogram_);
}
- void EndSwap() {
- std::unique_lock<std::mutex> g(mu_);
- while (new_stats_ != nullptr) {
- cv_.wait(g);
- };
- }
+ void EndSwap() {}
void MergeStatsInto(Histogram* hist) {
std::unique_lock<std::mutex> g(mu_);
@@ -252,29 +273,26 @@ class Client {
void ThreadFunc() {
for (;;) {
// run the loop body
- const bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_);
- // lock, see if we're done
+ HistogramEntry entry;
+ const bool thread_still_ok = client_->ThreadFunc(&entry, idx_);
+ // lock, update histogram if needed and see if we're done
std::lock_guard<std::mutex> g(mu_);
+ if (entry.used()) {
+ histogram_.Add(entry.value());
+ }
if (!thread_still_ok) {
gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
done_ = true;
}
if (done_) {
+ client_->CompleteThread();
return;
}
- // check if we're resetting stats, swap out the histogram if so
- if (new_stats_) {
- new_stats_->Swap(&histogram_);
- new_stats_ = nullptr;
- cv_.notify_one();
- }
}
}
std::mutex mu_;
- std::condition_variable cv_;
bool done_;
- Histogram* new_stats_;
Histogram histogram_;
Client* client_;
const size_t idx_;
@@ -286,6 +304,18 @@ class Client {
InterarrivalTimer interarrival_timer_;
std::vector<gpr_timespec> next_time_;
+
+ std::mutex thread_completion_mu_;
+ size_t threads_remaining_;
+ std::condition_variable threads_complete_;
+
+ void CompleteThread() {
+ std::lock_guard<std::mutex> g(thread_completion_mu_);
+ threads_remaining_--;
+ if (threads_remaining_ == 0) {
+ threads_complete_.notify_all();
+ }
+ }
};
template <class StubType, class RequestType>
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 1507d1e3d6..5d9cb4bd0c 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -31,7 +31,6 @@
*
*/
-#include <cassert>
#include <forward_list>
#include <functional>
#include <list>
@@ -48,7 +47,6 @@
#include <grpc++/generic/generic_stub.h>
#include <grpc/grpc.h>
#include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
#include <grpc/support/log.h>
#include "src/proto/grpc/testing/services.grpc.pb.h"
@@ -64,7 +62,7 @@ class ClientRpcContext {
ClientRpcContext() {}
virtual ~ClientRpcContext() {}
// next state, return false if done. Collect stats when appropriate
- virtual bool RunNextState(bool, Histogram* hist) = 0;
+ virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
virtual ClientRpcContext* StartNewClone() = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
@@ -104,7 +102,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
}
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
switch (next_state_) {
case State::READY:
start_ = UsageTimer::Now();
@@ -114,7 +112,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
next_state_ = State::RESP_DONE;
return true;
case State::RESP_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::INVALID;
return false;
@@ -176,6 +174,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
for (int i = 0; i < num_async_threads_; i++) {
cli_cqs_.emplace_back(new CompletionQueue);
next_issuers_.emplace_back(NextIssuer(i));
+ shutdown_state_.emplace_back(new PerThreadShutdownState());
}
using namespace std::placeholders;
@@ -192,7 +191,6 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
virtual ~AsyncClient() {
for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
- (*cq)->Shutdown();
void* got_tag;
bool ok;
while ((*cq)->Next(&got_tag, &ok)) {
@@ -201,32 +199,16 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
}
- bool ThreadFunc(Histogram* histogram,
- size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL {
- void* got_tag;
- bool ok;
-
- if (cli_cqs_[thread_idx]->Next(&got_tag, &ok)) {
- // Got a regular event, so process it
- ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
- if (!ctx->RunNextState(ok, histogram)) {
- // The RPC and callback are done, so clone the ctx
- // and kickstart the new one
- auto clone = ctx->StartNewClone();
- clone->Start(cli_cqs_[thread_idx].get());
- // delete the old version
- delete ctx;
- }
- return true;
- } else { // queue is shutting down
- return false;
- }
- }
-
protected:
const int num_async_threads_;
private:
+ struct PerThreadShutdownState {
+ mutable std::mutex mutex;
+ bool shutdown;
+ PerThreadShutdownState() : shutdown(false) {}
+ };
+
int NumThreads(const ClientConfig& config) {
int num_threads = config.async_client_threads();
if (num_threads <= 0) { // Use dynamic sizing
@@ -235,9 +217,60 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
return num_threads;
}
+ void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL {
+ for (auto ss = shutdown_state_.begin(); ss != shutdown_state_.end(); ++ss) {
+ std::lock_guard<std::mutex> lock((*ss)->mutex);
+ (*ss)->shutdown = true;
+ }
+ for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
+ (*cq)->Shutdown();
+ }
+ this->EndThreads(); // this needed for resolution
+ }
+
+ bool ThreadFunc(HistogramEntry* entry,
+ size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL {
+ void* got_tag;
+ bool ok;
+
+ switch (cli_cqs_[thread_idx]->AsyncNext(
+ &got_tag, &ok,
+ std::chrono::system_clock::now() + std::chrono::milliseconds(10))) {
+ case CompletionQueue::GOT_EVENT: {
+ // Got a regular event, so process it
+ ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
+ // Proceed while holding a lock to make sure that
+ // this thread isn't supposed to shut down
+ std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ if (shutdown_state_[thread_idx]->shutdown) {
+ return true;
+ } else if (!ctx->RunNextState(ok, entry)) {
+ // The RPC and callback are done, so clone the ctx
+ // and kickstart the new one
+ auto clone = ctx->StartNewClone();
+ clone->Start(cli_cqs_[thread_idx].get());
+ // delete the old version
+ delete ctx;
+ }
+ return true;
+ }
+ case CompletionQueue::TIMEOUT: {
+ std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ if (shutdown_state_[thread_idx]->shutdown) {
+ return true;
+ }
+ return true;
+ }
+ case CompletionQueue::SHUTDOWN: // queue is shutting down, so we must be
+ // done
+ return true;
+ }
+ GPR_UNREACHABLE_CODE(return true);
+ }
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
std::vector<std::function<gpr_timespec()>> next_issuers_;
+ std::vector<std::unique_ptr<PerThreadShutdownState>> shutdown_state_;
};
static std::unique_ptr<BenchmarkService::Stub> BenchmarkStubCreator(
@@ -253,7 +286,7 @@ class AsyncUnaryClient GRPC_FINAL
config, SetupCtx, BenchmarkStubCreator) {
StartThreads(num_async_threads_);
}
- ~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); }
+ ~AsyncUnaryClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, SimpleResponse* response) {}
@@ -298,7 +331,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
next_state_ = State::STREAM_IDLE;
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
while (true) {
switch (next_state_) {
case State::STREAM_IDLE:
@@ -330,7 +363,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
return true;
break;
case State::READ_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::STREAM_IDLE;
break; // loop around
@@ -382,7 +415,7 @@ class AsyncStreamingClient GRPC_FINAL
StartThreads(num_async_threads_);
}
- ~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
+ ~AsyncStreamingClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, SimpleResponse* response) {}
@@ -430,7 +463,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
ClientRpcContext::tag(this));
next_state_ = State::STREAM_IDLE;
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
while (true) {
switch (next_state_) {
case State::STREAM_IDLE:
@@ -462,7 +495,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
return true;
break;
case State::READ_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::STREAM_IDLE;
break; // loop around
@@ -518,7 +551,7 @@ class GenericAsyncStreamingClient GRPC_FINAL
StartThreads(num_async_threads_);
}
- ~GenericAsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
+ ~GenericAsyncStreamingClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, ByteBuffer* response) {}
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index c88e95b80e..25c7823553 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -31,7 +31,6 @@
*
*/
-#include <cassert>
#include <chrono>
#include <memory>
#include <mutex>
@@ -46,7 +45,6 @@
#include <grpc++/server_builder.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/histogram.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@@ -55,7 +53,6 @@
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/services.grpc.pb.h"
#include "test/cpp/qps/client.h"
-#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/qps/usage_timer.h"
@@ -90,6 +87,9 @@ class SynchronousClient
size_t num_threads_;
std::vector<SimpleResponse> responses_;
+
+ private:
+ void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { EndThreads(); }
};
class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
@@ -98,9 +98,9 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
: SynchronousClient(config) {
StartThreads(num_threads_);
}
- ~SynchronousUnaryClient() { EndThreads(); }
+ ~SynchronousUnaryClient() {}
- bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
+ bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) GRPC_OVERRIDE {
WaitToIssue(thread_idx);
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
double start = UsageTimer::Now();
@@ -108,7 +108,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
grpc::ClientContext context;
grpc::Status s =
stub->UnaryCall(&context, request_, &responses_[thread_idx]);
- histogram->Add((UsageTimer::Now() - start) * 1e9);
+ entry->set_value((UsageTimer::Now() - start) * 1e9);
return s.ok();
}
};
@@ -127,25 +127,29 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient {
StartThreads(num_threads_);
}
~SynchronousStreamingClient() {
- EndThreads();
- for (auto stream = &stream_[0]; stream != &stream_[num_threads_];
- stream++) {
+ for (size_t i = 0; i < num_threads_; i++) {
+ auto stream = &stream_[i];
if (*stream) {
(*stream)->WritesDone();
- EXPECT_TRUE((*stream)->Finish().ok());
+ Status s = (*stream)->Finish();
+ EXPECT_TRUE(s.ok());
+ if (!s.ok()) {
+ gpr_log(GPR_ERROR, "Stream %zu received an error %s", i,
+ s.error_message().c_str());
+ }
}
}
delete[] stream_;
delete[] context_;
}
- bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
+ bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) GRPC_OVERRIDE {
WaitToIssue(thread_idx);
GPR_TIMER_SCOPE("SynchronousStreamingClient::ThreadFunc", 0);
double start = UsageTimer::Now();
if (stream_[thread_idx]->Write(request_) &&
stream_[thread_idx]->Read(&responses_[thread_idx])) {
- histogram->Add((UsageTimer::Now() - start) * 1e9);
+ entry->set_value((UsageTimer::Now() - start) * 1e9);
return true;
}
return false;
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 08bf045883..2aeaea51f2 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -87,7 +87,7 @@ static std::unordered_map<string, std::deque<int>> get_hosts_and_cores(
CoreRequest dummy;
CoreResponse cores;
grpc::Status s = stub->CoreCount(&ctx, dummy, &cores);
- assert(s.ok());
+ GPR_ASSERT(s.ok());
std::deque<int> dq;
for (int i = 0; i < cores.cores(); i++) {
dq.push_back(i);
@@ -289,9 +289,13 @@ std::unique_ptr<ScenarioResult> RunScenario(
*args.mutable_setup() = server_config;
servers[i].stream =
servers[i].stub->RunServer(runsc::AllocContext(&contexts));
- GPR_ASSERT(servers[i].stream->Write(args));
+ if (!servers[i].stream->Write(args)) {
+ gpr_log(GPR_ERROR, "Could not write args to server %zu", i);
+ }
ServerStatus init_status;
- GPR_ASSERT(servers[i].stream->Read(&init_status));
+ if (!servers[i].stream->Read(&init_status)) {
+ gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i);
+ }
gpr_join_host_port(&cli_target, host, init_status.port());
client_config.add_server_targets(cli_target);
gpr_free(host);
@@ -345,9 +349,13 @@ std::unique_ptr<ScenarioResult> RunScenario(
*args.mutable_setup() = per_client_config;
clients[i].stream =
clients[i].stub->RunClient(runsc::AllocContext(&contexts));
- GPR_ASSERT(clients[i].stream->Write(args));
+ if (!clients[i].stream->Write(args)) {
+ gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
+ }
ClientStatus init_status;
- GPR_ASSERT(clients[i].stream->Read(&init_status));
+ if (!clients[i].stream->Read(&init_status)) {
+ gpr_log(GPR_ERROR, "Client %zu did not yield initial status", i);
+ }
}
// Let everything warmup
@@ -362,19 +370,31 @@ std::unique_ptr<ScenarioResult> RunScenario(
server_mark.mutable_mark()->set_reset(true);
ClientArgs client_mark;
client_mark.mutable_mark()->set_reset(true);
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Write(server_mark));
+ for (size_t i = 0; i < num_servers; i++) {
+ auto server = &servers[i];
+ if (!server->stream->Write(server_mark)) {
+ gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
+ }
}
- for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->Write(client_mark));
+ for (size_t i = 0; i < num_clients; i++) {
+ auto client = &clients[i];
+ if (!client->stream->Write(client_mark)) {
+ gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
+ }
}
ServerStatus server_status;
ClientStatus client_status;
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Read(&server_status));
+ for (size_t i = 0; i < num_servers; i++) {
+ auto server = &servers[i];
+ if (!server->stream->Read(&server_status)) {
+ gpr_log(GPR_ERROR, "Couldn't get status from server %zu", i);
+ }
}
- for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->Read(&client_status));
+ for (size_t i = 0; i < num_clients; i++) {
+ auto client = &clients[i];
+ if (!client->stream->Read(&client_status)) {
+ gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i);
+ }
}
// Wait some time
@@ -390,37 +410,73 @@ std::unique_ptr<ScenarioResult> RunScenario(
Histogram merged_latencies;
gpr_log(GPR_INFO, "Finishing clients");
- for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->Write(client_mark));
- GPR_ASSERT(client->stream->WritesDone());
+ for (size_t i = 0; i < num_clients; i++) {
+ auto client = &clients[i];
+ if (!client->stream->Write(client_mark)) {
+ gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
+ }
+ if (!client->stream->WritesDone()) {
+ gpr_log(GPR_ERROR, "Failed WritesDone for client %zu", i);
+ }
}
- for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->Read(&client_status));
- const auto& stats = client_status.stats();
- merged_latencies.MergeProto(stats.latencies());
- result->add_client_stats()->CopyFrom(stats);
- GPR_ASSERT(!client->stream->Read(&client_status));
+ for (size_t i = 0; i < num_clients; i++) {
+ auto client = &clients[i];
+ // Read the client final status
+ if (client->stream->Read(&client_status)) {
+ gpr_log(GPR_INFO, "Received final status from client %zu", i);
+ const auto& stats = client_status.stats();
+ merged_latencies.MergeProto(stats.latencies());
+ result->add_client_stats()->CopyFrom(stats);
+ // That final status should be the last message on the client stream
+ GPR_ASSERT(!client->stream->Read(&client_status));
+ } else {
+ gpr_log(GPR_ERROR, "Couldn't get final status from client %zu", i);
+ }
}
- for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->Finish().ok());
+ for (size_t i = 0; i < num_clients; i++) {
+ auto client = &clients[i];
+ Status s = client->stream->Finish();
+ result->add_client_success(s.ok());
+ if (!s.ok()) {
+ gpr_log(GPR_ERROR, "Client %zu had an error %s", i,
+ s.error_message().c_str());
+ }
}
delete[] clients;
merged_latencies.FillProto(result->mutable_latencies());
gpr_log(GPR_INFO, "Finishing servers");
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Write(server_mark));
- GPR_ASSERT(server->stream->WritesDone());
+ for (size_t i = 0; i < num_servers; i++) {
+ auto server = &servers[i];
+ if (!server->stream->Write(server_mark)) {
+ gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
+ }
+ if (!server->stream->WritesDone()) {
+ gpr_log(GPR_ERROR, "Failed WritesDone for server %zu", i);
+ }
}
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Read(&server_status));
- result->add_server_stats()->CopyFrom(server_status.stats());
- result->add_server_cores(server_status.cores());
- GPR_ASSERT(!server->stream->Read(&server_status));
+ for (size_t i = 0; i < num_servers; i++) {
+ auto server = &servers[i];
+ // Read the server final status
+ if (server->stream->Read(&server_status)) {
+ gpr_log(GPR_INFO, "Received final status from server %zu", i);
+ result->add_server_stats()->CopyFrom(server_status.stats());
+ result->add_server_cores(server_status.cores());
+ // That final status should be the last message on the server stream
+ GPR_ASSERT(!server->stream->Read(&server_status));
+ } else {
+ gpr_log(GPR_ERROR, "Couldn't get final status from server %zu", i);
+ }
}
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Finish().ok());
+ for (size_t i = 0; i < num_servers; i++) {
+ auto server = &servers[i];
+ Status s = server->stream->Finish();
+ result->add_server_success(s.ok());
+ if (!s.ok()) {
+ gpr_log(GPR_ERROR, "Server %zu had an error %s", i,
+ s.error_message().c_str());
+ }
}
delete[] servers;
@@ -429,8 +485,9 @@ std::unique_ptr<ScenarioResult> RunScenario(
return result;
}
-void RunQuit() {
+bool RunQuit() {
// Get client, server lists
+ bool result = true;
auto workers = get_workers("QPS_WORKERS");
for (size_t i = 0; i < workers.size(); i++) {
auto stub = WorkerService::NewStub(
@@ -438,8 +495,14 @@ void RunQuit() {
Void dummy;
grpc::ClientContext ctx;
ctx.set_fail_fast(false);
- GPR_ASSERT(stub->QuitWorker(&ctx, dummy, &dummy).ok());
+ Status s = stub->QuitWorker(&ctx, dummy, &dummy);
+ if (!s.ok()) {
+ gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
+ s.error_message().c_str());
+ result = false;
+ }
}
+ return result;
}
} // namespace testing
diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h
index 3a5cf138f1..93f4370caf 100644
--- a/test/cpp/qps/driver.h
+++ b/test/cpp/qps/driver.h
@@ -47,7 +47,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
const grpc::testing::ServerConfig& server_config, size_t num_servers,
int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count);
-void RunQuit();
+bool RunQuit();
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/gen_build_yaml.py b/test/cpp/qps/gen_build_yaml.py
index 34b8151441..4ff4e44b8b 100755
--- a/test/cpp/qps/gen_build_yaml.py
+++ b/test/cpp/qps/gen_build_yaml.py
@@ -45,9 +45,10 @@ import performance.scenario_config as scenario_config
def _scenario_json_string(scenario_json):
# tweak parameters to get fast test times
- scenario_json['warmup_seconds'] = 1
+ scenario_json['warmup_seconds'] = 0
scenario_json['benchmark_seconds'] = 1
- return json.dumps(scenario_config.remove_nonproto_fields(scenario_json))
+ scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(scenario_json)]}
+ return json.dumps(scenarios_json)
def threads_of_type(scenario_json, path):
d = scenario_json
@@ -72,8 +73,7 @@ print yaml.dump({
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
- 'args': ['--scenario_json',
- pipes.quote(_scenario_json_string(scenario_json))],
+ 'args': ['--scenarios_json', _scenario_json_string(scenario_json)],
'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'platforms': ['linux', 'mac', 'posix', 'windows'],
'flaky': False,
@@ -81,7 +81,8 @@ print yaml.dump({
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json),
- 'exclude_configs': []
+ 'exclude_configs': [],
+ 'timeout_seconds': 3*60
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
]
diff --git a/test/cpp/qps/json_run_localhost.cc b/test/cpp/qps/json_run_localhost.cc
index 6545dc2917..74e40fbf1a 100644
--- a/test/cpp/qps/json_run_localhost.cc
+++ b/test/cpp/qps/json_run_localhost.cc
@@ -75,7 +75,7 @@ int main(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
args.push_back(argv[i]);
}
- SubProcess(args).Join();
+ GPR_ASSERT(SubProcess(args).Join() == 0);
for (auto it = jobs.begin(); it != jobs.end(); ++it) {
(*it)->Interrupt();
diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc
index f5d739f893..1524ebbc38 100644
--- a/test/cpp/qps/qps_json_driver.cc
+++ b/test/cpp/qps/qps_json_driver.cc
@@ -53,7 +53,7 @@ DEFINE_bool(quit, false, "Quit the workers");
namespace grpc {
namespace testing {
-static void QpsDriver() {
+static bool QpsDriver() {
grpc::string json;
bool scfile = (FLAGS_scenarios_file != "");
@@ -81,13 +81,13 @@ static void QpsDriver() {
} else if (scjson) {
json = FLAGS_scenarios_json.c_str();
} else if (FLAGS_quit) {
- RunQuit();
- return;
+ return RunQuit();
}
// Parse into an array of scenarios
Scenarios scenarios;
ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios);
+ bool success = true;
// Make sure that there is at least some valid scenario here
GPR_ASSERT(scenarios.scenarios_size() > 0);
@@ -109,7 +109,15 @@ static void QpsDriver() {
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
GetReporter()->ReportTimes(*result);
+
+ for (int i = 0; success && i < result->client_success_size(); i++) {
+ success = result->client_success(i);
+ }
+ for (int i = 0; success && i < result->server_success_size(); i++) {
+ success = result->server_success(i);
+ }
}
+ return success;
}
} // namespace testing
@@ -118,7 +126,7 @@ static void QpsDriver() {
int main(int argc, char **argv) {
grpc::testing::InitBenchmark(&argc, &argv, true);
- grpc::testing::QpsDriver();
+ bool ok = grpc::testing::QpsDriver();
- return 0;
+ return ok ? 0 : 1;
}
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index f514e23e85..d3e53fe14a 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -33,7 +33,6 @@
#include "test/cpp/qps/qps_worker.h"
-#include <cassert>
#include <memory>
#include <mutex>
#include <sstream>
@@ -124,11 +123,12 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
GRPC_OVERRIDE {
InstanceGuard g(this);
if (!g.Acquired()) {
- return Status(StatusCode::RESOURCE_EXHAUSTED, "");
+ return Status(StatusCode::RESOURCE_EXHAUSTED, "Client worker busy");
}
ScopedProfile profile("qps_client.prof", false);
Status ret = RunClientBody(ctx, stream);
+ gpr_log(GPR_INFO, "RunClient: Returning");
return ret;
}
@@ -137,11 +137,12 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
GRPC_OVERRIDE {
InstanceGuard g(this);
if (!g.Acquired()) {
- return Status(StatusCode::RESOURCE_EXHAUSTED, "");
+ return Status(StatusCode::RESOURCE_EXHAUSTED, "Server worker busy");
}
ScopedProfile profile("qps_server.prof", false);
Status ret = RunServerBody(ctx, stream);
+ gpr_log(GPR_INFO, "RunServer: Returning");
return ret;
}
@@ -154,7 +155,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
Status QuitWorker(ServerContext* ctx, const Void*, Void*) GRPC_OVERRIDE {
InstanceGuard g(this);
if (!g.Acquired()) {
- return Status(StatusCode::RESOURCE_EXHAUSTED, "");
+ return Status(StatusCode::RESOURCE_EXHAUSTED, "Quitting worker busy");
}
worker_->MarkDone();
@@ -197,33 +198,38 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
ServerReaderWriter<ClientStatus, ClientArgs>* stream) {
ClientArgs args;
if (!stream->Read(&args)) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Couldn't read args");
}
if (!args.has_setup()) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Invalid setup arg");
}
gpr_log(GPR_INFO, "RunClientBody: about to create client");
auto client = CreateClient(args.setup());
if (!client) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Couldn't create client");
}
gpr_log(GPR_INFO, "RunClientBody: client created");
ClientStatus status;
if (!stream->Write(status)) {
- return Status(StatusCode::UNKNOWN, "");
+ return Status(StatusCode::UNKNOWN, "Client couldn't report init status");
}
gpr_log(GPR_INFO, "RunClientBody: creation status reported");
while (stream->Read(&args)) {
gpr_log(GPR_INFO, "RunClientBody: Message read");
if (!args.has_mark()) {
gpr_log(GPR_INFO, "RunClientBody: Message is not a mark!");
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Invalid mark");
}
*status.mutable_stats() = client->Mark(args.mark().reset());
- stream->Write(status);
+ if (!stream->Write(status)) {
+ return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark");
+ }
gpr_log(GPR_INFO, "RunClientBody: Mark response given");
}
+ gpr_log(GPR_INFO, "RunClientBody: Awaiting Threads Completion");
+ client->AwaitThreadsCompletion();
+
gpr_log(GPR_INFO, "RunClientBody: Returning");
return Status::OK;
}
@@ -232,10 +238,10 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
ServerReaderWriter<ServerStatus, ServerArgs>* stream) {
ServerArgs args;
if (!stream->Read(&args)) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Couldn't read server args");
}
if (!args.has_setup()) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Bad server creation args");
}
if (server_port_ != 0) {
args.mutable_setup()->set_port(server_port_);
@@ -243,24 +249,26 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
gpr_log(GPR_INFO, "RunServerBody: about to create server");
auto server = CreateServer(args.setup());
if (!server) {
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Couldn't create server");
}
gpr_log(GPR_INFO, "RunServerBody: server created");
ServerStatus status;
status.set_port(server->port());
status.set_cores(server->cores());
if (!stream->Write(status)) {
- return Status(StatusCode::UNKNOWN, "");
+ return Status(StatusCode::UNKNOWN, "Server couldn't report init status");
}
gpr_log(GPR_INFO, "RunServerBody: creation status reported");
while (stream->Read(&args)) {
gpr_log(GPR_INFO, "RunServerBody: Message read");
if (!args.has_mark()) {
gpr_log(GPR_INFO, "RunServerBody: Message not a mark!");
- return Status(StatusCode::INVALID_ARGUMENT, "");
+ return Status(StatusCode::INVALID_ARGUMENT, "Invalid mark");
}
*status.mutable_stats() = server->Mark(args.mark().reset());
- stream->Write(status);
+ if (!stream->Write(status)) {
+ return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark");
+ }
gpr_log(GPR_INFO, "RunServerBody: Mark response given");
}
diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc
index c9954d0d02..dea8746331 100644
--- a/test/cpp/qps/server_async.cc
+++ b/test/cpp/qps/server_async.cc
@@ -102,7 +102,7 @@ class AsyncQpsServerTest : public Server {
auto process_rpc_bound =
std::bind(process_rpc, config.payload_config(), _1, _2);
- for (int i = 0; i < 10000 / num_threads; i++) {
+ for (int i = 0; i < 15000; i++) {
for (int j = 0; j < num_threads; j++) {
if (request_unary_function) {
auto request_unary =
@@ -123,21 +123,24 @@ class AsyncQpsServerTest : public Server {
for (int i = 0; i < num_threads; i++) {
shutdown_state_.emplace_back(new PerThreadShutdownState());
- }
- for (int i = 0; i < num_threads; i++) {
threads_.emplace_back(&AsyncQpsServerTest::ThreadFunc, this, i);
}
}
~AsyncQpsServerTest() {
for (auto ss = shutdown_state_.begin(); ss != shutdown_state_.end(); ++ss) {
- (*ss)->set_shutdown();
+ std::lock_guard<std::mutex> lock((*ss)->mutex);
+ (*ss)->shutdown = true;
+ }
+ // TODO (vpai): Remove this deadline and allow Shutdown to finish properly
+ auto deadline = std::chrono::system_clock::now() + std::chrono::seconds(3);
+ server_->Shutdown(deadline);
+ for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) {
+ (*cq)->Shutdown();
}
- server_->Shutdown();
for (auto thr = threads_.begin(); thr != threads_.end(); thr++) {
thr->join();
}
for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) {
- (*cq)->Shutdown();
bool ok;
void *got_tag;
while ((*cq)->Next(&got_tag, &ok))
@@ -150,22 +153,24 @@ class AsyncQpsServerTest : public Server {
}
private:
- void ThreadFunc(int rank) {
+ void ThreadFunc(int thread_idx) {
// Wait until work is available or we are shutting down
bool ok;
void *got_tag;
- while (srv_cqs_[rank]->Next(&got_tag, &ok)) {
+ while (srv_cqs_[thread_idx]->Next(&got_tag, &ok)) {
ServerRpcContext *ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
- const bool still_going = ctx->RunNextState(ok);
- if (!shutdown_state_[rank]->shutdown()) {
- // this RPC context is done, so refresh it
- if (!still_going) {
- ctx->Reset();
- }
- } else {
+ // Proceed while holding a lock to make sure that
+ // this thread isn't supposed to shut down
+ std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ if (shutdown_state_[thread_idx]->shutdown) {
return;
}
+ const bool still_going = ctx->RunNextState(ok);
+ // if this RPC context is done, refresh it
+ if (!still_going) {
+ ctx->Reset();
+ }
}
return;
}
@@ -333,24 +338,12 @@ class AsyncQpsServerTest : public Server {
ServiceType async_service_;
std::forward_list<ServerRpcContext *> contexts_;
- class PerThreadShutdownState {
- public:
- PerThreadShutdownState() : shutdown_(false) {}
-
- bool shutdown() const {
- std::lock_guard<std::mutex> lock(mutex_);
- return shutdown_;
- }
-
- void set_shutdown() {
- std::lock_guard<std::mutex> lock(mutex_);
- shutdown_ = true;
- }
-
- private:
- mutable std::mutex mutex_;
- bool shutdown_;
+ struct PerThreadShutdownState {
+ mutable std::mutex mutex;
+ bool shutdown;
+ PerThreadShutdownState() : shutdown(false) {}
};
+
std::vector<std::unique_ptr<PerThreadShutdownState>> shutdown_state_;
};
diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc
index de7ff031ab..45799ae157 100644
--- a/test/cpp/util/slice_test.cc
+++ b/test/cpp/util/slice_test.cc
@@ -68,6 +68,16 @@ TEST_F(SliceTest, Empty) {
CheckSlice(empty_slice, "");
}
+TEST_F(SliceTest, Cslice) {
+ gpr_slice s = gpr_slice_from_copied_string(kContent);
+ Slice spp(s, Slice::STEAL_REF);
+ CheckSlice(spp, kContent);
+ gpr_slice c_slice = spp.c_slice();
+ EXPECT_EQ(GPR_SLICE_START_PTR(s), GPR_SLICE_START_PTR(c_slice));
+ EXPECT_EQ(GPR_SLICE_END_PTR(s), GPR_SLICE_END_PTR(c_slice));
+ gpr_slice_unref(c_slice);
+}
+
} // namespace
} // namespace grpc
diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py
index 72c65ad14a..f5e89f1da6 100755
--- a/tools/distrib/python/docgen.py
+++ b/tools/distrib/python/docgen.py
@@ -70,7 +70,6 @@ environment.update({
})
subprocess_arguments_list = [
- {'args': ['make'], 'cwd': PROJECT_ROOT},
{'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment},
{'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
'env': environment},
diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py
index 4b1e7fcd58..79c40717dd 100644
--- a/tools/distrib/python/grpcio_tools/grpc_version.py
+++ b/tools/distrib/python/grpcio_tools/grpc_version.py
@@ -29,4 +29,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
-VERSION='0.16.0.dev0'
+VERSION='1.1.0.dev0'
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index e025158a82..bb2c71d843 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -51,15 +51,43 @@ import grpc_version
PY3 = sys.version_info.major == 3
+# Environment variable to determine whether or not the Cython extension should
+# *use* Cython or use the generated C files. Note that this requires the C files
+# to have been generated by building first *with* Cython support.
+BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
+
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
-# By default we assume a GCC-like compiler.
-EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS',
- '-fno-wrapv -frtti -std=c++11'))
-EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS',
- '-lpthread'))
+# We can also use these variables as a way to inject environment-specific
+# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
+# reasonable default.
+EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
+EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
+if EXTRA_ENV_COMPILE_ARGS is None:
+ EXTRA_ENV_COMPILE_ARGS = '-fno-wrapv -frtti -std=c++11'
+ if 'win32' in sys.platform:
+ # We use define flags here and don't directly add to DEFINE_MACROS below to
+ # ensure that the expert user/builder has a way of turning it off (via the
+ # envvars) without adding yet more GRPC-specific envvars.
+ # See https://sourceforge.net/p/mingw-w64/bugs/363/
+ if '32' in platform.architecture()[0]:
+ EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
+ else:
+ EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
+if EXTRA_ENV_LINK_ARGS is None:
+ EXTRA_ENV_LINK_ARGS = '-lpthread'
+ if 'win32' in sys.platform:
+ # TODO(atash) check if this is actually safe to just import and call on
+ # non-Windows (to avoid breaking import style)
+ from distutils.cygwinccompiler import get_msvcr
+ msvcr = get_msvcr()[0]
+ EXTRA_ENV_LINK_ARGS += (
+ ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
+ '-static'.format(msvcr=msvcr))
+EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
+EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools'
GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto'
@@ -97,15 +125,19 @@ def package_data():
proto_files.append(relative_target)
return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
-def protoc_ext_module():
- plugin_sources = [
+def extension_modules():
+ if BUILD_WITH_CYTHON:
+ plugin_sources = ['grpc/tools/_protoc_compiler.pyx']
+ else:
+ plugin_sources = ['grpc/tools/_protoc_compiler.cpp']
+ plugin_sources += [
'grpc/tools/main.cc',
'grpc_root/src/compiler/python_generator.cc'] + [
os.path.join(protoc_lib_deps.CC_INCLUDE, cc_file)
for cc_file in protoc_lib_deps.CC_FILES]
plugin_ext = extension.Extension(
name='grpc.tools._protoc_compiler',
- sources=['grpc/tools/_protoc_compiler.pyx'] + plugin_sources,
+ sources=plugin_sources,
include_dirs=[
'.',
'grpc_root',
@@ -117,24 +149,23 @@ def protoc_ext_module():
extra_compile_args=list(EXTRA_COMPILE_ARGS),
extra_link_args=list(EXTRA_LINK_ARGS),
)
- return plugin_ext
-
-def maybe_cythonize(exts):
- from Cython import Build
- return Build.cythonize(exts)
+ extensions = [plugin_ext]
+ if BUILD_WITH_CYTHON:
+ from Cython import Build
+ return Build.cythonize(extensions)
+ else:
+ return extensions
setuptools.setup(
name='grpcio_tools',
version=grpc_version.VERSION,
license='3-clause BSD',
- ext_modules=maybe_cythonize([
- protoc_ext_module(),
- ]),
+ ext_modules=extension_modules(),
packages=setuptools.find_packages('.'),
namespace_packages=['grpc'],
install_requires=[
'protobuf>=3.0.0a3',
- 'grpcio>=0.14.0',
+ 'grpcio>=0.15.0',
],
package_data=package_data(),
)
diff --git a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
index eab7611b3f..462c65ab5e 100755
--- a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
+++ b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
@@ -31,7 +31,7 @@
set -e
# directories to run against
-DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include"
+DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include src/compiler"
# file matching patterns to check
GLOB="*.h *.c *.cc"
diff --git a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
index 150dde4f21..e3d52f0cb5 100644
--- a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#================
# C# dependencies
diff --git a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
index bbd903e269..aa77d5f127 100644
--- a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
index ec71a53c2d..05e963d1e6 100644
--- a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
@@ -32,5 +32,20 @@ FROM golang:1.5
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Define the default command.
CMD ["bash"]
diff --git a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
index ec71a53c2d..05e963d1e6 100644
--- a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
@@ -32,5 +32,20 @@ FROM golang:1.5
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Define the default command.
CMD ["bash"]
diff --git a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
index 252c9bc928..b5fe54f991 100644
--- a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
@@ -47,6 +47,21 @@ ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
ENV PATH $PATH:$JAVA_HOME/bin
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Trigger download of as many Gradle artifacts as possible.
RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && \
diff --git a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
index be07094cd2..d9a7501829 100644
--- a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Node dependencies
diff --git a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
index af83ee6164..65a8334269 100644
--- a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Ruby dependencies
diff --git a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
index 8e7319c200..10a88916ad 100644
--- a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
index 88b513032a..dae64e5c8c 100644
--- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Ruby dependencies
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
index 823fe948fb..81e3fdc380 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
index 556a26ee13..e082da648b 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
index 2a875f59f1..1e2b7d8c67 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
@@ -34,6 +34,21 @@ RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
index 69bef1480c..0c17ff595e 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
index 0738e95e9b..0594f69a5b 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Node dependencies
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
index 3092bd955e..bbbdd4a151 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Ruby dependencies
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
index ee6249d381..20d2d3f57b 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
@@ -93,7 +93,7 @@ RUN apt-get update && apt-get install -y \
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
RUN pip install coverage
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
index 36b54ddafe..f459153fe5 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
index 98515aa5d7..25c6fe6ec6 100644
--- a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
+++ b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#================
# C# dependencies
diff --git a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
index 150dde4f21..e3d52f0cb5 100644
--- a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#================
# C# dependencies
diff --git a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
index a8aa74dd0e..67cee19914 100644
--- a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
index abd3e42f26..bee0849c67 100644
--- a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
index 5ef25e80b4..2b3f4af3e6 100644
--- a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
index c65fc61977..2d282276d3 100644
--- a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
index 9d5dd52c18..c25033387f 100644
--- a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/fuzzer/Dockerfile b/tools/dockerfile/test/fuzzer/Dockerfile
index 3ac134ad7d..bd04f07cea 100644
--- a/tools/dockerfile/test/fuzzer/Dockerfile
+++ b/tools/dockerfile/test/fuzzer/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
index bd7728580f..92c8436851 100644
--- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
@@ -137,7 +137,7 @@ RUN apt-get update && apt-get install -y \
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile
index be07094cd2..d9a7501829 100644
--- a/tools/dockerfile/test/node_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Node dependencies
diff --git a/tools/dockerfile/test/php_jessie_x64/Dockerfile b/tools/dockerfile/test/php_jessie_x64/Dockerfile
index e477295722..2ef6e1d47f 100644
--- a/tools/dockerfile/test/php_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/php_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#=================
# PHP dependencies
diff --git a/tools/dockerfile/test/python_jessie_x64/Dockerfile b/tools/dockerfile/test/python_jessie_x64/Dockerfile
index 8e7319c200..10a88916ad 100644
--- a/tools/dockerfile/test/python_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/python_jessie_x64/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
new file mode 100644
index 0000000000..abb5f3c89b
--- /dev/null
+++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
@@ -0,0 +1,112 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM debian:jessie
+
+# Install Git and basic packages.
+RUN apt-get update && apt-get install -y \
+ autoconf \
+ autotools-dev \
+ build-essential \
+ bzip2 \
+ ccache \
+ curl \
+ gcc \
+ gcc-multilib \
+ git \
+ golang \
+ gyp \
+ lcov \
+ libc6 \
+ libc6-dbg \
+ libc6-dev \
+ libgtest-dev \
+ libtool \
+ make \
+ perl \
+ strace \
+ python-dev \
+ python-setuptools \
+ python-yaml \
+ telnet \
+ unzip \
+ wget \
+ zip && apt-get clean
+
+#================
+# Build profiling
+RUN apt-get update && apt-get install -y time && apt-get clean
+
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
+# Install dependencies for pyenv
+RUN apt-get update && apt-get install -y \
+ libbz2-dev \
+ libncurses5-dev \
+ libncursesw5-dev \
+ libreadline-dev \
+ libsqlite3-dev \
+ libssl-dev \
+ llvm \
+ mercurial \
+ zlib1g-dev && apt-get clean
+
+# Install Pyenv and dev Python versions 3.5 and 3.6
+RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
+RUN pyenv update
+RUN pyenv install 3.5-dev
+RUN pyenv install 3.6-dev
+RUN pyenv local 3.5-dev 3.6-dev
+
+# Prepare ccache
+RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
+RUN ln -s /usr/bin/ccache /usr/local/bin/g++
+RUN ln -s /usr/bin/ccache /usr/local/bin/cc
+RUN ln -s /usr/bin/ccache /usr/local/bin/c++
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
+
+
+RUN mkdir /var/local/jenkins
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
index 88b513032a..dae64e5c8c 100644
--- a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#==================
# Ruby dependencies
diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile
index 70a32c5586..f4b4831a64 100644
--- a/tools/dockerfile/test/sanity/Dockerfile
+++ b/tools/dockerfile/test/sanity/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+ python-all-dev \
+ python3-all-dev \
+ python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
#========================
# Sanity test dependencies
RUN apt-get update && apt-get install -y \
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index db6b36f8c7..a2415e1217 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 0.16.0-dev
+PROJECT_NUMBER = 1.1.0-dev
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
@@ -857,15 +857,7 @@ include/grpc/impl/codegen/sync.h \
include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
-include/grpc/impl/codegen/time.h \
-include/grpc/byte_buffer.h \
-include/grpc/byte_buffer_reader.h \
-include/grpc/compression.h \
-include/grpc/grpc.h \
-include/grpc/grpc_posix.h \
-include/grpc/status.h \
-include/grpc/grpc_security.h \
-include/grpc/grpc_security_constants.h
+include/grpc/impl/codegen/time.h
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 660e501d71..945298b964 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 0.16.0-dev
+PROJECT_NUMBER = 1.1.0-dev
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
@@ -858,14 +858,6 @@ include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
include/grpc/impl/codegen/time.h \
-include/grpc/byte_buffer.h \
-include/grpc/byte_buffer_reader.h \
-include/grpc/compression.h \
-include/grpc/grpc.h \
-include/grpc/grpc_posix.h \
-include/grpc/status.h \
-include/grpc/grpc_security.h \
-include/grpc/grpc_security_constants.h \
include/grpc++/impl/codegen/core_codegen.h \
src/cpp/client/secure_credentials.h \
src/cpp/common/secure_auth_context.h \
@@ -873,109 +865,6 @@ src/cpp/server/secure_server_credentials.h \
src/cpp/client/create_channel_internal.h \
src/cpp/server/dynamic_thread_pool.h \
src/cpp/server/thread_pool_interface.h \
-src/core/lib/channel/channel_args.h \
-src/core/lib/channel/channel_stack.h \
-src/core/lib/channel/channel_stack_builder.h \
-src/core/lib/channel/compress_filter.h \
-src/core/lib/channel/connected_channel.h \
-src/core/lib/channel/context.h \
-src/core/lib/channel/http_client_filter.h \
-src/core/lib/channel/http_server_filter.h \
-src/core/lib/compression/algorithm_metadata.h \
-src/core/lib/compression/message_compress.h \
-src/core/lib/debug/trace.h \
-src/core/lib/http/format_request.h \
-src/core/lib/http/httpcli.h \
-src/core/lib/http/parser.h \
-src/core/lib/iomgr/closure.h \
-src/core/lib/iomgr/endpoint.h \
-src/core/lib/iomgr/endpoint_pair.h \
-src/core/lib/iomgr/error.h \
-src/core/lib/iomgr/ev_epoll_linux.h \
-src/core/lib/iomgr/ev_poll_and_epoll_posix.h \
-src/core/lib/iomgr/ev_poll_posix.h \
-src/core/lib/iomgr/ev_posix.h \
-src/core/lib/iomgr/exec_ctx.h \
-src/core/lib/iomgr/executor.h \
-src/core/lib/iomgr/iocp_windows.h \
-src/core/lib/iomgr/iomgr.h \
-src/core/lib/iomgr/iomgr_internal.h \
-src/core/lib/iomgr/iomgr_posix.h \
-src/core/lib/iomgr/load_file.h \
-src/core/lib/iomgr/network_status_tracker.h \
-src/core/lib/iomgr/polling_entity.h \
-src/core/lib/iomgr/pollset.h \
-src/core/lib/iomgr/pollset_set.h \
-src/core/lib/iomgr/pollset_set_windows.h \
-src/core/lib/iomgr/pollset_windows.h \
-src/core/lib/iomgr/resolve_address.h \
-src/core/lib/iomgr/sockaddr.h \
-src/core/lib/iomgr/sockaddr_posix.h \
-src/core/lib/iomgr/sockaddr_utils.h \
-src/core/lib/iomgr/sockaddr_windows.h \
-src/core/lib/iomgr/socket_utils_posix.h \
-src/core/lib/iomgr/socket_windows.h \
-src/core/lib/iomgr/tcp_client.h \
-src/core/lib/iomgr/tcp_posix.h \
-src/core/lib/iomgr/tcp_server.h \
-src/core/lib/iomgr/tcp_windows.h \
-src/core/lib/iomgr/time_averaged_stats.h \
-src/core/lib/iomgr/timer.h \
-src/core/lib/iomgr/timer_heap.h \
-src/core/lib/iomgr/udp_server.h \
-src/core/lib/iomgr/unix_sockets_posix.h \
-src/core/lib/iomgr/wakeup_fd_pipe.h \
-src/core/lib/iomgr/wakeup_fd_posix.h \
-src/core/lib/iomgr/workqueue.h \
-src/core/lib/iomgr/workqueue_posix.h \
-src/core/lib/iomgr/workqueue_windows.h \
-src/core/lib/json/json.h \
-src/core/lib/json/json_common.h \
-src/core/lib/json/json_reader.h \
-src/core/lib/json/json_writer.h \
-src/core/lib/surface/api_trace.h \
-src/core/lib/surface/call.h \
-src/core/lib/surface/call_test_only.h \
-src/core/lib/surface/channel.h \
-src/core/lib/surface/channel_init.h \
-src/core/lib/surface/channel_stack_type.h \
-src/core/lib/surface/completion_queue.h \
-src/core/lib/surface/event_string.h \
-src/core/lib/surface/init.h \
-src/core/lib/surface/lame_client.h \
-src/core/lib/surface/server.h \
-src/core/lib/transport/byte_stream.h \
-src/core/lib/transport/connectivity_state.h \
-src/core/lib/transport/metadata.h \
-src/core/lib/transport/metadata_batch.h \
-src/core/lib/transport/static_metadata.h \
-src/core/lib/transport/transport.h \
-src/core/lib/transport/transport_impl.h \
-src/core/lib/security/context/security_context.h \
-src/core/lib/security/credentials/composite/composite_credentials.h \
-src/core/lib/security/credentials/credentials.h \
-src/core/lib/security/credentials/fake/fake_credentials.h \
-src/core/lib/security/credentials/google_default/google_default_credentials.h \
-src/core/lib/security/credentials/iam/iam_credentials.h \
-src/core/lib/security/credentials/jwt/json_token.h \
-src/core/lib/security/credentials/jwt/jwt_credentials.h \
-src/core/lib/security/credentials/jwt/jwt_verifier.h \
-src/core/lib/security/credentials/oauth2/oauth2_credentials.h \
-src/core/lib/security/credentials/plugin/plugin_credentials.h \
-src/core/lib/security/credentials/ssl/ssl_credentials.h \
-src/core/lib/security/transport/auth_filters.h \
-src/core/lib/security/transport/handshake.h \
-src/core/lib/security/transport/secure_endpoint.h \
-src/core/lib/security/transport/security_connector.h \
-src/core/lib/security/transport/tsi_error.h \
-src/core/lib/security/util/b64.h \
-src/core/lib/security/util/json_util.h \
-src/core/ext/transport/chttp2/alpn/alpn.h \
-src/core/lib/tsi/fake_transport_security.h \
-src/core/lib/tsi/ssl_transport_security.h \
-src/core/lib/tsi/ssl_types.h \
-src/core/lib/tsi/transport_security.h \
-src/core/lib/tsi/transport_security_interface.h \
src/cpp/client/secure_credentials.cc \
src/cpp/common/auth_property_iterator.cc \
src/cpp/common/secure_auth_context.cc \
@@ -1008,122 +897,6 @@ src/cpp/util/slice.cc \
src/cpp/util/status.cc \
src/cpp/util/string_ref.cc \
src/cpp/util/time.cc \
-src/core/lib/channel/channel_args.c \
-src/core/lib/channel/channel_stack.c \
-src/core/lib/channel/channel_stack_builder.c \
-src/core/lib/channel/compress_filter.c \
-src/core/lib/channel/connected_channel.c \
-src/core/lib/channel/http_client_filter.c \
-src/core/lib/channel/http_server_filter.c \
-src/core/lib/compression/compression.c \
-src/core/lib/compression/message_compress.c \
-src/core/lib/debug/trace.c \
-src/core/lib/http/format_request.c \
-src/core/lib/http/httpcli.c \
-src/core/lib/http/parser.c \
-src/core/lib/iomgr/closure.c \
-src/core/lib/iomgr/endpoint.c \
-src/core/lib/iomgr/endpoint_pair_posix.c \
-src/core/lib/iomgr/endpoint_pair_windows.c \
-src/core/lib/iomgr/error.c \
-src/core/lib/iomgr/ev_epoll_linux.c \
-src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
-src/core/lib/iomgr/ev_poll_posix.c \
-src/core/lib/iomgr/ev_posix.c \
-src/core/lib/iomgr/exec_ctx.c \
-src/core/lib/iomgr/executor.c \
-src/core/lib/iomgr/iocp_windows.c \
-src/core/lib/iomgr/iomgr.c \
-src/core/lib/iomgr/iomgr_posix.c \
-src/core/lib/iomgr/iomgr_windows.c \
-src/core/lib/iomgr/load_file.c \
-src/core/lib/iomgr/network_status_tracker.c \
-src/core/lib/iomgr/polling_entity.c \
-src/core/lib/iomgr/pollset_set_windows.c \
-src/core/lib/iomgr/pollset_windows.c \
-src/core/lib/iomgr/resolve_address_posix.c \
-src/core/lib/iomgr/resolve_address_windows.c \
-src/core/lib/iomgr/sockaddr_utils.c \
-src/core/lib/iomgr/socket_utils_common_posix.c \
-src/core/lib/iomgr/socket_utils_linux.c \
-src/core/lib/iomgr/socket_utils_posix.c \
-src/core/lib/iomgr/socket_windows.c \
-src/core/lib/iomgr/tcp_client_posix.c \
-src/core/lib/iomgr/tcp_client_windows.c \
-src/core/lib/iomgr/tcp_posix.c \
-src/core/lib/iomgr/tcp_server_posix.c \
-src/core/lib/iomgr/tcp_server_windows.c \
-src/core/lib/iomgr/tcp_windows.c \
-src/core/lib/iomgr/time_averaged_stats.c \
-src/core/lib/iomgr/timer.c \
-src/core/lib/iomgr/timer_heap.c \
-src/core/lib/iomgr/udp_server.c \
-src/core/lib/iomgr/unix_sockets_posix.c \
-src/core/lib/iomgr/unix_sockets_posix_noop.c \
-src/core/lib/iomgr/wakeup_fd_eventfd.c \
-src/core/lib/iomgr/wakeup_fd_nospecial.c \
-src/core/lib/iomgr/wakeup_fd_pipe.c \
-src/core/lib/iomgr/wakeup_fd_posix.c \
-src/core/lib/iomgr/workqueue_posix.c \
-src/core/lib/iomgr/workqueue_windows.c \
-src/core/lib/json/json.c \
-src/core/lib/json/json_reader.c \
-src/core/lib/json/json_string.c \
-src/core/lib/json/json_writer.c \
-src/core/lib/surface/alarm.c \
-src/core/lib/surface/api_trace.c \
-src/core/lib/surface/byte_buffer.c \
-src/core/lib/surface/byte_buffer_reader.c \
-src/core/lib/surface/call.c \
-src/core/lib/surface/call_details.c \
-src/core/lib/surface/call_log_batch.c \
-src/core/lib/surface/channel.c \
-src/core/lib/surface/channel_init.c \
-src/core/lib/surface/channel_ping.c \
-src/core/lib/surface/channel_stack_type.c \
-src/core/lib/surface/completion_queue.c \
-src/core/lib/surface/event_string.c \
-src/core/lib/surface/lame_client.c \
-src/core/lib/surface/metadata_array.c \
-src/core/lib/surface/server.c \
-src/core/lib/surface/validate_metadata.c \
-src/core/lib/surface/version.c \
-src/core/lib/transport/byte_stream.c \
-src/core/lib/transport/connectivity_state.c \
-src/core/lib/transport/metadata.c \
-src/core/lib/transport/metadata_batch.c \
-src/core/lib/transport/static_metadata.c \
-src/core/lib/transport/transport.c \
-src/core/lib/transport/transport_op_string.c \
-src/core/lib/http/httpcli_security_connector.c \
-src/core/lib/security/context/security_context.c \
-src/core/lib/security/credentials/composite/composite_credentials.c \
-src/core/lib/security/credentials/credentials.c \
-src/core/lib/security/credentials/credentials_metadata.c \
-src/core/lib/security/credentials/fake/fake_credentials.c \
-src/core/lib/security/credentials/google_default/credentials_posix.c \
-src/core/lib/security/credentials/google_default/credentials_windows.c \
-src/core/lib/security/credentials/google_default/google_default_credentials.c \
-src/core/lib/security/credentials/iam/iam_credentials.c \
-src/core/lib/security/credentials/jwt/json_token.c \
-src/core/lib/security/credentials/jwt/jwt_credentials.c \
-src/core/lib/security/credentials/jwt/jwt_verifier.c \
-src/core/lib/security/credentials/oauth2/oauth2_credentials.c \
-src/core/lib/security/credentials/plugin/plugin_credentials.c \
-src/core/lib/security/credentials/ssl/ssl_credentials.c \
-src/core/lib/security/transport/client_auth_filter.c \
-src/core/lib/security/transport/handshake.c \
-src/core/lib/security/transport/secure_endpoint.c \
-src/core/lib/security/transport/security_connector.c \
-src/core/lib/security/transport/server_auth_filter.c \
-src/core/lib/security/transport/tsi_error.c \
-src/core/lib/security/util/b64.c \
-src/core/lib/security/util/json_util.c \
-src/core/lib/surface/init_secure.c \
-src/core/ext/transport/chttp2/alpn/alpn.c \
-src/core/lib/tsi/fake_transport_security.c \
-src/core/lib/tsi/ssl_transport_security.c \
-src/core/lib/tsi/transport_security.c \
src/cpp/codegen/codegen_init.cc
# This tag can be used to specify the character encoding of the source files
diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core
index 53ae4e4cf4..e631c962b3 100644
--- a/tools/doxygen/Doxyfile.core
+++ b/tools/doxygen/Doxyfile.core
@@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 0.16.0-dev
+PROJECT_NUMBER = 1.1.0-dev
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index b846237689..8233da957d 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 0.16.0-dev
+PROJECT_NUMBER = 1.1.0-dev
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifact_targets.py
index bd1269ceb7..e9267be58b 100644
--- a/tools/run_tests/artifact_targets.py
+++ b/tools/run_tests/artifact_targets.py
@@ -40,7 +40,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
environ['RUN_COMMAND'] = shell_command
docker_args=[]
- for k,v in environ.iteritems():
+ for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/build_artifact_python.bat
index 7c8c2aa12d..a7b1a58284 100644
--- a/tools/run_tests/build_artifact_python.bat
+++ b/tools/run_tests/build_artifact_python.bat
@@ -34,29 +34,6 @@ pip install --upgrade six
pip install --upgrade setuptools
pip install -rrequirements.txt
-@rem Because this is windows and *everything seems to hate Windows* we have to
-@rem set all of these flags ourselves because Python won't help us (see the
-@rem setup.py of the grpcio_tools project).
-set GRPC_PYTHON_CFLAGS=-fno-wrapv -frtti -std=c++11
-
-@rem See https://sourceforge.net/p/mingw-w64/bugs/363/
-if %2 == 32 (
- set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s
-) else (
- set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime64 -D_timeb=__timeb64
-)
-
-@rem Further confusing things, MSYS2's mingw64 tries to dynamically link
-@rem libgcc, libstdc++, and winpthreads. We have to override this or our
-@rem extensions end up linking to MSYS2 DLLs, which the normal Python on
-@rem Windows user won't have... and ON TOP OF THIS, there's MinGW's GCC default
-@rem behavior of linking msvcrt.dll as the C runtime library, which we need to
-@rem override so that Python's distutils doesn't link us against multiple C
-@rem runtimes.
-python -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])" > temp.txt
-set /p PYTHON_MSVCR=<temp.txt
-set GRPC_PYTHON_LDFLAGS=-static-libgcc -static-libstdc++ -mcrtdll=%PYTHON_MSVCR% -static -lpthread
-
set GRPC_PYTHON_BUILD_WITH_CYTHON=1
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
index a3fa8200d5..9cb3cb12a9 100755
--- a/tools/run_tests/build_python.sh
+++ b/tools/run_tests/build_python.sh
@@ -127,19 +127,6 @@ if [ $(is_linux) ]; then
fi
fi
fi
-# TODO(atash) consider conceptualizing MinGW as a first-class platform and move
-# these flags into our `setup.py`s
-if [ $(is_mingw) ]; then
- # We're on MinGW, and our CFLAGS and LDFLAGS will be eaten by the void. Use
- # our work-around environment variables instead.
- PYTHON_MSVCR=`$PYTHON -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])"`
- export GRPC_PYTHON_LDFLAGS="-static-libgcc -static-libstdc++ -mcrtdll=$PYTHON_MSVCR -static -lpthread"
- # See https://sourceforge.net/p/mingw-w64/bugs/363/
- export GRPC_PYTHON_CFLAGS="-D_ftime=_ftime64 -D_timeb=__timeb64"
- # TODO(atash) set these flags for only grpcio-tools (they don't do any harm to
- # grpcio, but they result in noisy warnings).
- export GRPC_PYTHON_CFLAGS="-frtti -std=c++11 $GRPC_PYTHON_CFLAGS"
-fi
############################
# Perform build operations #
diff --git a/tools/run_tests/distribtest_targets.py b/tools/run_tests/distribtest_targets.py
index 1a7aa0bfc8..7930f2a0a4 100644
--- a/tools/run_tests/distribtest_targets.py
+++ b/tools/run_tests/distribtest_targets.py
@@ -41,7 +41,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
environ['RELATIVE_COPY_PATH'] = 'test/distrib'
docker_args=[]
- for k,v in environ.iteritems():
+ for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/dockerjob.py
index e4ca3b7faa..4a7e61b3c4 100755
--- a/tools/run_tests/dockerjob.py
+++ b/tools/run_tests/dockerjob.py
@@ -29,6 +29,8 @@
"""Helpers to run docker instances as jobs."""
+from __future__ import print_function
+
import jobset
import tempfile
import time
@@ -95,7 +97,7 @@ def remove_image(image, skip_nonexistent=False, max_retries=10):
stderr=subprocess.STDOUT) == 0:
return True
time.sleep(2)
- print 'Failed to remove docker image %s' % image
+ print('Failed to remove docker image %s' % image)
return False
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index 3999537c40..b6fb6318e0 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -29,6 +29,8 @@
"""Run a group of subprocesses and then finish."""
+from __future__ import print_function
+
import multiprocessing
import os
import platform
@@ -123,8 +125,8 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
- print explanatory_text
- print '%s: %s' % (tag, msg)
+ print(explanatory_text)
+ print('%s: %s' % (tag, msg))
return
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
@@ -368,7 +370,7 @@ class Jobset(object):
self._travis,
self._add_env)
self._running.add(job)
- if not self.resultset.has_key(job.GetSpec().shortname):
+ if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
diff --git a/tools/run_tests/package_targets.py b/tools/run_tests/package_targets.py
index 39a11a243d..ce3f08dfbc 100644
--- a/tools/run_tests/package_targets.py
+++ b/tools/run_tests/package_targets.py
@@ -39,7 +39,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
environ['RUN_COMMAND'] = shell_command
docker_args=[]
- for k,v in environ.iteritems():
+ for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template
new file mode 100644
index 0000000000..c219fa888a
--- /dev/null
+++ b/tools/run_tests/perf_html_report.template
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html lang="en">
+<head><title>Performance Test Result</title></head>
+<body>
+ <h2>Performance Test Result</h2>
+ <table style="width:50%" border="1">
+ <% sorted_test_cases = sorted(resultset.keys()) %>
+ % for test_case in sorted_test_cases:
+ <tr><td bgcolor="#00BFFF" style="width:30%"><b>${test_case}</b></td>
+ <% result = resultset[test_case] %>
+ <td>
+ % for k, v in result.iteritems():
+ ${k}: ${v}<br>
+ % endfor
+ </td>
+ </tr>
+ % endfor
+ </table>
+
+</body>
+</html>
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index fbccf3bdca..2a99499843 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -118,6 +118,8 @@ def _flatten_result_inplace(scenario_result):
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+ scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
+ scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
def _populate_metadata_inplace(scenario_result):
diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json
index 0325414757..6bec21df39 100644
--- a/tools/run_tests/performance/scenario_result_schema.json
+++ b/tools/run_tests/performance/scenario_result_schema.json
@@ -198,5 +198,15 @@
"mode": "NULLABLE"
}
]
+ },
+ {
+ "name": "clientSuccess",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "serverSuccess",
+ "type": "STRING",
+ "mode": "NULLABLE"
}
]
diff --git a/tools/run_tests/port_server.py b/tools/run_tests/port_server.py
index 83f8e6cd35..e9b3f7ff79 100755
--- a/tools/run_tests/port_server.py
+++ b/tools/run_tests/port_server.py
@@ -30,8 +30,10 @@
"""Manage TCP ports for unit tests; started by run_tests.py"""
+from __future__ import print_function
+
import argparse
-import BaseHTTPServer
+from six.moves import BaseHTTPServer
import hashlib
import os
import socket
@@ -46,7 +48,7 @@ _MY_VERSION = 9
if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
- print _MY_VERSION
+ print(_MY_VERSION)
sys.exit(0)
@@ -62,7 +64,7 @@ if args.logfile is not None:
sys.stderr = open(args.logfile, 'w')
sys.stdout = sys.stderr
-print 'port server running on port %d' % args.port
+print('port server running on port %d' % args.port)
pool = []
in_use = {}
@@ -152,7 +154,7 @@ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
now = time.time()
- self.wfile.write(yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.iteritems())}))
+ self.wfile.write(yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())}))
elif self.path == '/quitquitquit':
self.send_response(200)
self.end_headers()
@@ -164,4 +166,4 @@ while keep_running:
httpd.handle_request()
sys.stderr.flush()
-print 'done'
+print('done')
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 35dcaca3d3..7188d3dcd7 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -29,12 +29,16 @@
"""Generate XML and HTML test reports."""
+from __future__ import print_function
+
try:
from mako.runtime import Context
from mako.template import Template
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
+import glob
+import json
import os
import string
import xml.etree.cElementTree as ET
@@ -60,7 +64,7 @@ def render_junit_xml_report(resultset, xml_report):
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc',
name='tests')
- for shortname, results in resultset.iteritems():
+ for shortname, results in resultset.items():
for result in results:
xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
if result.elapsed_time:
@@ -83,10 +87,10 @@ def render_interop_html_report(
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
- print 'Mako template is not installed. Skipping HTML report generation.'
+ print('Mako template is not installed. Skipping HTML report generation.')
return
except IOError as e:
- print 'Failed to find the template %s: %s' % (template_file, e)
+ print('Failed to find the template %s: %s' % (template_file, e))
return
sorted_test_cases = sorted(test_cases)
@@ -118,3 +122,38 @@ def render_interop_html_report(
print(exceptions.text_error_template().render())
raise
+
+def render_perf_html_report(report_dir):
+ """Generate a simple HTML report for the perf tests."""
+ template_file = 'tools/run_tests/perf_html_report.template'
+ try:
+ mytemplate = Template(filename=template_file, format_exceptions=True)
+ except NameError:
+ print('Mako template is not installed. Skipping HTML report generation.')
+ return
+ except IOError as e:
+ print('Failed to find the template %s: %s' % (template_file, e))
+ return
+
+ resultset = {}
+ for result_file in glob.glob(os.path.join(report_dir, '*.json')):
+ with open(result_file, 'r') as f:
+ scenario_result = json.loads(f.read())
+ test_case = scenario_result['scenario']['name']
+ if 'ping_pong' in test_case:
+ latency50 = round(scenario_result['summary']['latency50'], 2)
+ latency99 = round(scenario_result['summary']['latency99'], 2)
+ summary = {'latency50': latency50, 'latency99': latency99}
+ else:
+ summary = {'qps': round(scenario_result['summary']['qps'], 2)}
+ resultset[test_case] = summary
+
+ args = {'resultset': resultset}
+
+ html_file_path = os.path.join(report_dir, 'index.html')
+ try:
+ with open(html_file_path, 'w') as output_file:
+ mytemplate.render_context(Context(output_file, **args))
+ except:
+ print(exceptions.text_error_template().render())
+ raise
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 13a4a49325..2e5a2f7721 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -30,6 +30,8 @@
"""Run interop (cross-language) tests in parallel."""
+from __future__ import print_function
+
import argparse
import atexit
import dockerjob
@@ -286,7 +288,7 @@ class RubyLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_ADVANCED + _SKIP_COMPRESSION
+ return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
@@ -372,7 +374,7 @@ def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
# turn environ into -e docker args
if environ:
- for k,v in environ.iteritems():
+ for k,v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k,v)]
# set working directory
@@ -674,15 +676,15 @@ servers = set(s for s in itertools.chain.from_iterable(_SERVERS
if args.use_docker:
if not args.travis:
- print 'Seen --use_docker flag, will run interop tests under docker.'
- print
- print 'IMPORTANT: The changes you are testing need to be locally committed'
- print 'because only the committed changes in the current branch will be'
- print 'copied to the docker environment.'
+ print('Seen --use_docker flag, will run interop tests under docker.')
+ print('')
+ print('IMPORTANT: The changes you are testing need to be locally committed')
+ print('because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
time.sleep(5)
if not args.use_docker and servers:
- print 'Running interop servers is only supported with --use_docker option enabled.'
+ print('Running interop servers is only supported with --use_docker option enabled.')
sys.exit(1)
languages = set(_LANGUAGES[l]
@@ -768,7 +770,7 @@ try:
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
- for server_name, server_address in server_addresses.iteritems():
+ for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
server_language = _LANGUAGES.get(server_name, None)
skip_server = [] # test cases unimplemented by server
@@ -800,7 +802,7 @@ try:
jobs.append(test_job)
if not jobs:
- print 'No jobs to run.'
+ print('No jobs to run.')
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
@@ -814,7 +816,7 @@ try:
report_utils.render_junit_xml_report(resultset, 'report.xml')
- for name, job in resultset.iteritems():
+ for name, job in resultset.items():
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
@@ -826,12 +828,12 @@ try:
finally:
# Check if servers are still running.
- for server, job in server_jobs.iteritems():
+ for server, job in server_jobs.items():
if not job.is_running():
- print 'Server "%s" has exited prematurely.' % server
+ print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
for image in docker_images.itervalues():
- print 'Removing docker image %s' % image
+ print('Removing docker image %s' % image)
dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 14901caf07..5ff9696808 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -30,6 +30,8 @@
"""Run performance tests locally or remotely."""
+from __future__ import print_function
+
import argparse
import itertools
import jobset
@@ -38,6 +40,7 @@ import multiprocessing
import os
import pipes
import re
+import report_utils
import subprocess
import sys
import tempfile
@@ -52,6 +55,7 @@ os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
+_REPORT_DIR = 'perf_reports'
class QpsWorkerJob:
@@ -101,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json'
+ if not os.path.isdir(_REPORT_DIR):
+ os.makedirs(_REPORT_DIR)
+ report_path = os.path.join(_REPORT_DIR,
+ '%s-scenario_result.json' % scenario_json['name'])
+ cmd += '--scenario_result_file=%s' % report_path
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -310,7 +318,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
'in the same scenario')
if custom_server_lang:
if not workers_by_lang.get(custom_server_lang, []):
- print 'Warning: Skipping scenario %s as' % scenario_json['name']
+ print('Warning: Skipping scenario %s as' % scenario_json['name'])
print('SERVER_LANGUAGE is set to %s yet the language has '
'not been selected with -l' % custom_server_lang)
continue
@@ -319,7 +327,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
workers[idx] = workers_by_lang[custom_server_lang][idx]
if custom_client_lang:
if not workers_by_lang.get(custom_client_lang, []):
- print 'Warning: Skipping scenario %s as' % scenario_json['name']
+ print('Warning: Skipping scenario %s as' % scenario_json['name'])
print('CLIENT_LANGUAGE is set to %s yet the language has '
'not been selected with -l' % custom_client_lang)
continue
@@ -344,14 +352,14 @@ def finish_qps_workers(jobs):
while any(job.is_running() for job in jobs):
for job in qpsworker_jobs:
if job.is_running():
- print 'QPS worker "%s" is still running.' % job.host_and_port
+ print('QPS worker "%s" is still running.' % job.host_and_port)
if retries > 10:
- print 'Killing all QPS workers.'
+ print('Killing all QPS workers.')
for job in jobs:
job.kill()
retries += 1
time.sleep(3)
- print 'All QPS workers finished.'
+ print('All QPS workers finished.')
argp = argparse.ArgumentParser(description='Run performance tests.')
@@ -434,6 +442,9 @@ try:
jobset.message('START', 'Running scenarios.', do_newline=True)
num_failures, _ = jobset.run(
scenarios, newline_on_success=True, maxjobs=1)
+
+ report_utils.render_perf_html_report(_REPORT_DIR)
+
if num_failures == 0:
jobset.message('SUCCESS',
'All scenarios finished successfully.',
diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py
index e42ee24ffb..de4a22877c 100755
--- a/tools/run_tests/run_stress_tests.py
+++ b/tools/run_tests/run_stress_tests.py
@@ -29,6 +29,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run stress test in C++"""
+from __future__ import print_function
+
import argparse
import atexit
import dockerjob
@@ -93,7 +95,7 @@ def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
# turn environ into -e docker args
if environ:
- for k, v in environ.iteritems():
+ for k, v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k, v)]
# set working directory
@@ -140,7 +142,7 @@ def cloud_to_cloud_jobspec(language,
'--num_channels_per_server=%s' % num_channels_per_server,
'--metrics_port=%s' % metrics_port
]))
- print cmdline
+ print(cmdline)
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
@@ -287,7 +289,7 @@ try:
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
- for server_name, server_address in server_addresses.iteritems():
+ for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
for language in languages:
test_job = cloud_to_cloud_jobspec(
@@ -302,7 +304,7 @@ try:
jobs.append(test_job)
if not jobs:
- print 'No jobs to run.'
+ print('No jobs to run.')
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
@@ -317,12 +319,12 @@ try:
finally:
# Check if servers are still running.
- for server, job in server_jobs.iteritems():
+ for server, job in server_jobs.items():
if not job.is_running():
- print 'Server "%s" has exited prematurely.' % server
+ print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
for image in docker_images.itervalues():
- print 'Removing docker image %s' % image
+ print('Removing docker image %s' % image)
dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index b0e20698bd..57fff2ec9c 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -30,6 +30,8 @@
"""Run tests in parallel."""
+from __future__ import print_function
+
import argparse
import ast
import collections
@@ -39,6 +41,7 @@ import json
import multiprocessing
import os
import os.path
+import pipes
import platform
import random
import re
@@ -48,7 +51,7 @@ import sys
import tempfile
import traceback
import time
-import urllib2
+from six.moves import urllib
import uuid
import jobset
@@ -72,6 +75,9 @@ def platform_string():
return jobset.platform_string()
+_DEFAULT_TIMEOUT_SECONDS = 5 * 60
+
+
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
@@ -84,7 +90,7 @@ class Config(object):
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
- def job_spec(self, cmdline, timeout_seconds=5*60,
+ def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
@@ -93,7 +99,7 @@ class Config(object):
would like to run
"""
actual_environ = self.environ.copy()
- for k, v in environ.iteritems():
+ for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
@@ -159,7 +165,7 @@ class CLanguage(object):
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy}
- shortname_ext = '' if polling_strategy=='all' else ' polling=%s' % polling_strategy
+ shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if self.config.build_config in target['exclude_configs']:
continue
if self.platform == 'windows':
@@ -190,28 +196,26 @@ class CLanguage(object):
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary] + ['--gtest_filter=%s' % test]
- out.append(self.config.job_spec(cmdline, [binary],
- shortname='%s:%s %s' % (binary, test, shortname_ext),
+ out.append(self.config.job_spec(cmdline,
+ shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
cpu_cost=target['cpu_cost'],
environ=env))
else:
cmdline = [binary] + target['args']
- out.append(self.config.job_spec(cmdline, [binary],
- shortname=' '.join(cmdline) + shortname_ext,
+ out.append(self.config.job_spec(cmdline,
+ shortname=' '.join(
+ pipes.quote(arg)
+ for arg in cmdline) +
+ shortname_ext,
cpu_cost=target['cpu_cost'],
flaky=target.get('flaky', False),
+ timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS),
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
- print '\nWARNING: binary not found, skipping', binary
+ print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
- test_regex = self.args.regex
- if self.platform != 'windows' and self.args.regex != '.*':
- # use the regex to minimize the number of things to build
- return [os.path.basename(target['name'])
- for target in get_c_tests(False, self.test_lang)
- if re.search(test_regex, '/' + target['name'])]
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
@@ -393,7 +397,7 @@ class PythonLanguage(object):
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
- environ=dict(environment.items() +
+ environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
@@ -418,7 +422,10 @@ class PythonLanguage(object):
return 'Makefile'
def dockerfile_dir(self):
- return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch)
+ return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+
+ def python_manager_name(self):
+ return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
@@ -453,6 +460,8 @@ class PythonLanguage(object):
shell + runner + [os.path.join(name, venv_relative_python[0])])
python27_config = python_config_generator(name='py27', major='2', minor='7', bits=bits)
python34_config = python_config_generator(name='py34', major='3', minor='4', bits=bits)
+ python35_config = python_config_generator(name='py35', major='3', minor='5', bits=bits)
+ python36_config = python_config_generator(name='py36', major='3', minor='6', bits=bits)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
@@ -462,6 +471,10 @@ class PythonLanguage(object):
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
+ elif args.compiler == 'python3.5':
+ return (python35_config,)
+ elif args.compiler == 'python3.6':
+ return (python36_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
@@ -662,7 +675,7 @@ class ObjCLanguage(object):
return []
def make_targets(self):
- return ['grpc_objective_c_plugin', 'interop_server']
+ return ['interop_server']
def make_options(self):
return []
@@ -755,7 +768,7 @@ def _windows_arch_option(arch):
elif arch == 'x64':
return '/p:Platform=x64'
else:
- print 'Architecture %s not supported.' % arch
+ print('Architecture %s not supported.' % arch)
sys.exit(1)
@@ -773,11 +786,11 @@ def _check_arch_option(arch):
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
- print 'Architecture %s does not match current runtime architecture.' % arch
+ print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
- print 'Architecture %s not supported on current platform.' % args.arch
+ print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
@@ -791,7 +804,7 @@ def _windows_build_bat(compiler):
elif compiler == 'vs2010':
return 'vsprojects\\build_vs2010.bat'
else:
- print 'Compiler %s not supported.' % compiler
+ print('Compiler %s not supported.' % compiler)
sys.exit(1)
@@ -805,7 +818,7 @@ def _windows_toolset_option(compiler):
elif compiler == 'vs2010':
return '/p:PlatformToolset=v100'
else:
- print 'Compiler %s not supported.' % compiler
+ print('Compiler %s not supported.' % compiler)
sys.exit(1)
@@ -816,7 +829,7 @@ def _docker_arch_suffix(arch):
elif arch == 'x86':
return 'x86'
else:
- print 'Architecture %s not supported with current settings.' % arch
+ print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
@@ -893,7 +906,7 @@ argp.add_argument('--compiler',
'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
- 'python2.7', 'python3.4',
+ 'python2.7', 'python3.4', 'python3.5', 'python3.6',
'node0.12', 'node4', 'node5',
'coreclr'],
default='default',
@@ -932,7 +945,7 @@ for spec in args.update_submodules:
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
- print 'in %s: git %s' % (cwd, cmd)
+ print('in %s: git %s' % (cwd, cmd))
subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
git('fetch')
git('checkout %s' % branch)
@@ -943,8 +956,8 @@ if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
else:
- print 'WARNING: may need to regenerate projects, but since we are not on'
- print ' Linux this step is being skipped. Compilation MAY fail.'
+ print('WARNING: may need to regenerate projects, but since we are not on')
+ print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
@@ -971,18 +984,18 @@ for l in languages:
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
- print 'languages with custom make options cannot be built simultaneously with other languages'
+ print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
- print 'Seen --use_docker flag, will run tests under docker.'
- print
- print 'IMPORTANT: The changes you are testing need to be locally committed'
- print 'because only the committed changes in the current branch will be'
- print 'copied to the docker environment.'
+ print('Seen --use_docker flag, will run tests under docker.')
+ print('')
+ print('IMPORTANT: The changes you are testing need to be locally committed')
+ print('because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
@@ -1066,7 +1079,7 @@ build_steps = list(set(
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
- make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.iteritems())
+ make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
@@ -1083,6 +1096,18 @@ forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
+ version = int(urllib.request.urlopen(
+ 'http://localhost:%d/version_number' % legacy_server_port,
+ timeout=10).read())
+ except:
+ pass
+ else:
+ urllib.request.urlopen(
+ 'http://localhost:%d/quitquitquit' % legacy_server_port).read()
+
+
+def _shut_down_legacy_server(legacy_server_port):
+ try:
version = int(urllib2.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
@@ -1099,29 +1124,29 @@ def _start_port_server(port_server_port):
# if not running ==> start a new one
# otherwise, leave it up
try:
- version = int(urllib2.urlopen(
+ version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % port_server_port,
timeout=10).read())
- print 'detected port server running version %d' % version
+ print('detected port server running version %d' % version)
running = True
except Exception as e:
- print 'failed to detect port server: %s' % sys.exc_info()[0]
- print e.strerror
+ print('failed to detect port server: %s' % sys.exc_info()[0])
+ print(e.strerror)
running = False
if running:
current_version = int(subprocess.check_output(
[sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'dump_version']))
- print 'my port server is version %d' % current_version
+ print('my port server is version %d' % current_version)
running = (version >= current_version)
if not running:
- print 'port_server version mismatch: killing the old one'
- urllib2.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
+ print('port_server version mismatch: killing the old one')
+ urllib.request.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
- print 'starting port_server, with log file %s' % logfile
+ print('starting port_server, with log file %s' % logfile)
args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
@@ -1147,34 +1172,34 @@ def _start_port_server(port_server_port):
waits = 0
while True:
if waits > 10:
- print 'killing port server due to excessive start up waits'
+ print('killing port server due to excessive start up waits')
port_server.kill()
if port_server.poll() is not None:
- print 'port_server failed to start'
+ print('port_server failed to start')
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
- urllib2.urlopen('http://localhost:%d/get' % port_server_port,
+ urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
- print 'last ditch attempt to contact port server succeeded'
+ print('last ditch attempt to contact port server succeeded')
break
except:
traceback.print_exc()
port_log = open(logfile, 'r').read()
- print port_log
+ print(port_log)
sys.exit(1)
try:
- urllib2.urlopen('http://localhost:%d/get' % port_server_port,
+ urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
- print 'port server is up and ready'
+ print('port server is up and ready')
break
except socket.timeout:
- print 'waiting for port_server: timeout'
+ print('waiting for port_server: timeout')
traceback.print_exc();
time.sleep(1)
waits += 1
- except urllib2.URLError:
- print 'waiting for port_server: urlerror'
+ except urllib.error.URLError:
+ print('waiting for port_server: urlerror')
traceback.print_exc();
time.sleep(1)
waits += 1
@@ -1271,8 +1296,6 @@ def _build_and_run(
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
- else:
- jobset.message('PASSED', k, do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 3ebb445b8a..e3cfd55cd6 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -1823,22 +1823,6 @@
"gpr",
"gpr_test_util",
"grpc",
- "grpc_test_util"
- ],
- "headers": [],
- "language": "c",
- "name": "workqueue_test",
- "src": [
- "test/core/iomgr/workqueue_test.c"
- ],
- "third_party": false,
- "type": "target"
- },
- {
- "deps": [
- "gpr",
- "gpr_test_util",
- "grpc",
"grpc++",
"grpc++_test_util",
"grpc_test_util"
@@ -4345,7 +4329,6 @@
},
{
"deps": [
- "gpr",
"grpc",
"grpc++_base",
"grpc++_codegen_base",
@@ -4459,6 +4442,7 @@
{
"deps": [
"gpr",
+ "grpc",
"grpc++_base",
"grpc++_codegen_base",
"grpc++_codegen_base_src",
@@ -6515,10 +6499,8 @@
},
{
"deps": [
- "gpr",
- "grpc++_codegen_base",
- "grpc_base",
- "grpc_secure"
+ "grpc",
+ "grpc++_codegen_base"
],
"headers": [
"include/grpc++/alarm.h",
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index b42aa17cbb..2e3fa443b9 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -30,6 +30,8 @@
"""Runs selected gRPC test/build tasks."""
+from __future__ import print_function
+
import argparse
import atexit
import jobset
@@ -111,7 +113,7 @@ build_jobs = []
for target in targets:
build_jobs.append(target.build_jobspec())
if not build_jobs:
- print 'Nothing to build.'
+ print('Nothing to build.')
sys.exit(1)
jobset.message('START', 'Building targets.', do_newline=True)
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index 93d42e3454..d94301b946 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -1940,25 +1940,6 @@
"ci_platforms": [
"linux",
"mac",
- "posix"
- ],
- "cpu_cost": 1.0,
- "exclude_configs": [],
- "flaky": false,
- "gtest": false,
- "language": "c",
- "name": "workqueue_test",
- "platforms": [
- "linux",
- "mac",
- "posix"
- ]
- },
- {
- "args": [],
- "ci_platforms": [
- "linux",
- "mac",
"posix",
"windows"
],
@@ -27153,8 +27134,8 @@
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27175,12 +27156,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_streaming_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27201,12 +27183,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_secure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27227,12 +27210,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_sync_unary_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27253,12 +27237,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_secure"
+ "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27279,12 +27264,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_secure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27305,12 +27291,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27331,12 +27318,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27357,12 +27345,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27383,12 +27372,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27409,12 +27399,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_insecure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27435,12 +27426,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_sync_unary_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27461,12 +27453,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_insecure"
+ "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27487,12 +27480,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_insecure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27513,12 +27507,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure"
+ "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27539,12 +27534,13 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure",
+ "timeout_seconds": 180
},
{
"args": [
- "--scenario_json",
- "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
+ "--scenarios_json",
+ "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}"
],
"boringssl": true,
"ci_platforms": [
@@ -27565,7 +27561,8 @@
"posix",
"windows"
],
- "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure"
+ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure",
+ "timeout_seconds": 180
},
{
"args": [
diff --git a/vsprojects/grpc.sln b/vsprojects/grpc.sln
index 8fccc646e5..84720914b0 100644
--- a/vsprojects/grpc.sln
+++ b/vsprojects/grpc.sln
@@ -49,7 +49,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc++", "vcxproj\.\grpc++\
EndProjectSection
ProjectSection(ProjectDependencies) = postProject
{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
- {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc++_reflection", "vcxproj\.\grpc++_reflection\grpc++_reflection.vcxproj", "{5F575402-3F89-5D1A-6910-9DB8BF5D2BAB}"
@@ -67,6 +66,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc++_unsecure", "vcxproj\
ProjectSection(ProjectDependencies) = postProject
{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5} = {46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}
+ {29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_create_jwt", "vcxproj\.\grpc_create_jwt\grpc_create_jwt.vcxproj", "{77971F8D-F583-3E77-0E3C-6C1FB6B1749C}"
diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj b/vsprojects/vcxproj/grpc++/grpc++.vcxproj
index a2711ca7a4..cb9e41ea22 100644
--- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj
+++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj
@@ -356,14 +356,6 @@
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\sync_posix.h" />
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\sync_windows.h" />
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\time.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer_reader.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\compression.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security_constants.h" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\core_codegen.h" />
@@ -373,109 +365,6 @@
<ClInclude Include="$(SolutionDir)\..\src\cpp\client\create_channel_internal.h" />
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\dynamic_thread_pool.h" />
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\thread_pool_interface.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\context.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\algorithm_metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\debug\trace.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\format_request.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\httpcli.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\parser.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_writer.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call_test_only.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\event_string.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport_impl.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\auth_filters.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\b64.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_types.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security_interface.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="$(SolutionDir)\..\src\cpp\client\secure_credentials.cc">
@@ -542,238 +431,6 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\util\time.cc">
</ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\compression.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\debug\trace.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\format_request.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\parser.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_common_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_linux.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix_noop.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_eventfd.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_nospecial.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_writer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\alarm.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer_reader.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_details.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_log_batch.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_ping.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\event_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\metadata_array.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\server.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\validate_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\version.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport_op_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli_security_connector.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\client_auth_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\server_auth_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\b64.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\init_secure.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.c">
- </ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\codegen\codegen_init.cc">
</ClCompile>
</ItemGroup>
@@ -781,9 +438,6 @@
<ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj">
<Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project>
</ProjectReference>
- <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr\gpr.vcxproj">
- <Project>{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}</Project>
- </ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
index f478ac9839..a9051182b3 100644
--- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
@@ -97,354 +97,6 @@
<ClCompile Include="$(SolutionDir)\..\src\cpp\util\time.cc">
<Filter>src\cpp\util</Filter>
</ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\compression.c">
- <Filter>src\core\lib\compression</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.c">
- <Filter>src\core\lib\compression</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\debug\trace.c">
- <Filter>src\core\lib\debug</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\format_request.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\parser.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_common_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_linux.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix_noop.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_eventfd.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_nospecial.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_string.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_writer.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\alarm.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer_reader.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_details.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_log_batch.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_ping.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\event_string.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\metadata_array.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\server.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\validate_metadata.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\version.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport_op_string.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli_security_connector.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.c">
- <Filter>src\core\lib\security\context</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.c">
- <Filter>src\core\lib\security\credentials\composite</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.c">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials_metadata.c">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.c">
- <Filter>src\core\lib\security\credentials\fake</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_posix.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_windows.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.c">
- <Filter>src\core\lib\security\credentials\iam</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.c">
- <Filter>src\core\lib\security\credentials\oauth2</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.c">
- <Filter>src\core\lib\security\credentials\plugin</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.c">
- <Filter>src\core\lib\security\credentials\ssl</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\client_auth_filter.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\server_auth_filter.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\b64.c">
- <Filter>src\core\lib\security\util</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.c">
- <Filter>src\core\lib\security\util</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\init_secure.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.c">
- <Filter>src\core\ext\transport\chttp2\alpn</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\codegen\codegen_init.cc">
<Filter>src\cpp\codegen</Filter>
</ClCompile>
@@ -744,30 +396,6 @@
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\time.h">
<Filter>include\grpc\impl\codegen</Filter>
</ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer_reader.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\compression.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_posix.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security_constants.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\core_codegen.h">
@@ -791,315 +419,6 @@
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\thread_pool_interface.h">
<Filter>src\cpp\server</Filter>
</ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\context.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\algorithm_metadata.h">
- <Filter>src\core\lib\compression</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.h">
- <Filter>src\core\lib\compression</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\debug\trace.h">
- <Filter>src\core\lib\debug</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\format_request.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\httpcli.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\parser.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_writer.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call_test_only.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\event_string.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport_impl.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.h">
- <Filter>src\core\lib\security\context</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.h">
- <Filter>src\core\lib\security\credentials\composite</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.h">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.h">
- <Filter>src\core\lib\security\credentials\fake</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.h">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.h">
- <Filter>src\core\lib\security\credentials\iam</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.h">
- <Filter>src\core\lib\security\credentials\oauth2</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.h">
- <Filter>src\core\lib\security\credentials\plugin</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.h">
- <Filter>src\core\lib\security\credentials\ssl</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\auth_filters.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\b64.h">
- <Filter>src\core\lib\security\util</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.h">
- <Filter>src\core\lib\security\util</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.h">
- <Filter>src\core\ext\transport\chttp2\alpn</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_types.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security_interface.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
@@ -1139,90 +458,6 @@
<Filter Include="src">
<UniqueIdentifier>{328ff211-2886-406e-56f9-18ba1686f363}</UniqueIdentifier>
</Filter>
- <Filter Include="src\core">
- <UniqueIdentifier>{d02f1155-7e7e-3736-3c69-dc9146dc523d}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext">
- <UniqueIdentifier>{96d09c4a-59f9-3486-6c2f-cbf695b285d8}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport">
- <UniqueIdentifier>{202b1172-189f-afc4-f16c-4ca12677b480}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport\chttp2">
- <UniqueIdentifier>{9de393b8-4b6e-6c34-122a-940419ca9989}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport\chttp2\alpn">
- <UniqueIdentifier>{efb6b3e6-8c7b-c2a0-12c6-486c68cdb8ec}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib">
- <UniqueIdentifier>{80567a8f-622f-a3ce-c12d-aebb63984b07}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\channel">
- <UniqueIdentifier>{e769265c-8abd-cd64-2cc2-a52da484fe7b}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\compression">
- <UniqueIdentifier>{701b2d46-11c6-3640-b189-45287f00bee3}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\debug">
- <UniqueIdentifier>{ada68fd5-8e51-98cb-71a7-baf7989d8ffa}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\http">
- <UniqueIdentifier>{e770844e-61d4-555e-59be-81288e21a35f}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\iomgr">
- <UniqueIdentifier>{04dfa1c8-7ffe-4f06-4a7c-37441dc75764}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\json">
- <UniqueIdentifier>{a5d5bddf-6f19-b655-a03a-f30ff5c253a5}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security">
- <UniqueIdentifier>{dbd8cbb6-6308-d6fe-7a36-06cc7045c037}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\context">
- <UniqueIdentifier>{ecd2c264-808d-0041-2f69-a5200543de91}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials">
- <UniqueIdentifier>{0015e481-7e80-8936-a25c-c3fa260cc095}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\composite">
- <UniqueIdentifier>{fad200df-a5e2-1648-7442-cea0f07edd4d}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\fake">
- <UniqueIdentifier>{397464b3-9bbd-15a5-041b-c7deef1662ec}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\google_default">
- <UniqueIdentifier>{567691b4-6a06-cc5a-c6ad-e8c080b89ecf}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\iam">
- <UniqueIdentifier>{d5930113-d396-7a70-d273-d07a1feae0ff}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\jwt">
- <UniqueIdentifier>{0f6afb67-4b51-6344-9de7-2b1a18a19e7d}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\oauth2">
- <UniqueIdentifier>{99faa051-ca9f-cb4f-36d5-95f042fb22bc}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\plugin">
- <UniqueIdentifier>{b7a9e7e5-2445-6b0f-4677-5095ca10e760}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\ssl">
- <UniqueIdentifier>{436bc65a-0c1b-d85a-2c91-6474588c5cb6}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\transport">
- <UniqueIdentifier>{e6a9bf58-3b0f-0b3d-3a35-3ded80d27695}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\util">
- <UniqueIdentifier>{b4a1cab8-5c2c-909a-8097-7a5c8f0aa9f7}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\surface">
- <UniqueIdentifier>{fb2276d7-5a11-f1d9-82c3-e7c7f1155523}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\transport">
- <UniqueIdentifier>{4bd7971a-68f7-0d5a-f502-6dea3099caaa}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\tsi">
- <UniqueIdentifier>{aa0153b8-c9b6-ae1d-ebdd-89754d8579f1}</UniqueIdentifier>
- </Filter>
<Filter Include="src\cpp">
<UniqueIdentifier>{2420a905-e4f1-a5aa-a364-6a112878a39e}</UniqueIdentifier>
</Filter>
diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
index 84e709611d..03be485b29 100644
--- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
+++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
@@ -356,122 +356,11 @@
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\sync_posix.h" />
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\sync_windows.h" />
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\time.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer_reader.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\compression.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security.h" />
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security_constants.h" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(SolutionDir)\..\src\cpp\client\create_channel_internal.h" />
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\dynamic_thread_pool.h" />
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\thread_pool_interface.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\context.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\algorithm_metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\debug\trace.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\format_request.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\httpcli.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\parser.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_writer.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call_test_only.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\event_string.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport_impl.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\auth_filters.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\b64.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_types.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.h" />
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security_interface.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="$(SolutionDir)\..\src\cpp\common\insecure_create_auth_context.cc">
@@ -528,238 +417,6 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\util\time.cc">
</ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\compression.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\debug\trace.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\format_request.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\parser.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_common_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_linux.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix_noop.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_eventfd.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_nospecial.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_writer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\alarm.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer_reader.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_details.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_log_batch.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_ping.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\event_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\metadata_array.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\server.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\validate_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\version.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport_op_string.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli_security_connector.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials_metadata.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_posix.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_windows.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\client_auth_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\server_auth_filter.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\b64.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\init_secure.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.c">
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.c">
- </ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\codegen\codegen_init.cc">
</ClCompile>
</ItemGroup>
@@ -770,6 +427,9 @@
<ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_unsecure\grpc_unsecure.vcxproj">
<Project>{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}</Project>
</ProjectReference>
+ <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj">
+ <Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project>
+ </ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
index 1e54e1595d..ba99bc53c8 100644
--- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
@@ -82,354 +82,6 @@
<ClCompile Include="$(SolutionDir)\..\src\cpp\util\time.cc">
<Filter>src\cpp\util</Filter>
</ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.c">
- <Filter>src\core\lib\channel</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\compression.c">
- <Filter>src\core\lib\compression</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.c">
- <Filter>src\core\lib\compression</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\debug\trace.c">
- <Filter>src\core\lib\debug</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\format_request.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\parser.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_common_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_linux.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix_noop.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_eventfd.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_nospecial.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.c">
- <Filter>src\core\lib\iomgr</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_reader.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_string.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\json\json_writer.c">
- <Filter>src\core\lib\json</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\alarm.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\byte_buffer_reader.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_details.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\call_log_batch.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_ping.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\event_string.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\metadata_array.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\server.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\validate_metadata.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\version.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\transport\transport_op_string.c">
- <Filter>src\core\lib\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\http\httpcli_security_connector.c">
- <Filter>src\core\lib\http</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.c">
- <Filter>src\core\lib\security\context</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.c">
- <Filter>src\core\lib\security\credentials\composite</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.c">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials_metadata.c">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.c">
- <Filter>src\core\lib\security\credentials\fake</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_posix.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\credentials_windows.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.c">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.c">
- <Filter>src\core\lib\security\credentials\iam</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.c">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.c">
- <Filter>src\core\lib\security\credentials\oauth2</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.c">
- <Filter>src\core\lib\security\credentials\plugin</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.c">
- <Filter>src\core\lib\security\credentials\ssl</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\client_auth_filter.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\server_auth_filter.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.c">
- <Filter>src\core\lib\security\transport</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\b64.c">
- <Filter>src\core\lib\security\util</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.c">
- <Filter>src\core\lib\security\util</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\surface\init_secure.c">
- <Filter>src\core\lib\surface</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.c">
- <Filter>src\core\ext\transport\chttp2\alpn</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
- <ClCompile Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.c">
- <Filter>src\core\lib\tsi</Filter>
- </ClCompile>
<ClCompile Include="$(SolutionDir)\..\src\cpp\codegen\codegen_init.cc">
<Filter>src\cpp\codegen</Filter>
</ClCompile>
@@ -729,30 +381,6 @@
<ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\time.h">
<Filter>include\grpc\impl\codegen</Filter>
</ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\byte_buffer_reader.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\compression.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_posix.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc_security_constants.h">
- <Filter>include\grpc</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(SolutionDir)\..\src\cpp\client\create_channel_internal.h">
@@ -764,315 +392,6 @@
<ClInclude Include="$(SolutionDir)\..\src\cpp\server\thread_pool_interface.h">
<Filter>src\cpp\server</Filter>
</ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_args.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\channel_stack_builder.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\compress_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\connected_channel.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\context.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_client_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\channel\http_server_filter.h">
- <Filter>src\core\lib\channel</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\algorithm_metadata.h">
- <Filter>src\core\lib\compression</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\compression\message_compress.h">
- <Filter>src\core\lib\compression</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\debug\trace.h">
- <Filter>src\core\lib\debug</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\format_request.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\httpcli.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\http\parser.h">
- <Filter>src\core\lib\http</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\closure.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\exec_ctx.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\executor.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iocp_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\resolve_address.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_utils.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\sockaddr_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_utils_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\socket_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_client.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_server.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\tcp_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\time_averaged_stats.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\timer_heap.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\udp_server.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\unix_sockets_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_pipe.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\wakeup_fd_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_posix.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\workqueue_windows.h">
- <Filter>src\core\lib\iomgr</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_common.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_reader.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\json\json_writer.h">
- <Filter>src\core\lib\json</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\api_trace.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\call_test_only.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_init.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\channel_stack_type.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\completion_queue.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\event_string.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h">
- <Filter>src\core\lib\surface</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata_batch.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\static_metadata.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\transport_impl.h">
- <Filter>src\core\lib\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\context\security_context.h">
- <Filter>src\core\lib\security\context</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\composite\composite_credentials.h">
- <Filter>src\core\lib\security\credentials\composite</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\credentials.h">
- <Filter>src\core\lib\security\credentials</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\fake\fake_credentials.h">
- <Filter>src\core\lib\security\credentials\fake</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\google_default\google_default_credentials.h">
- <Filter>src\core\lib\security\credentials\google_default</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\iam\iam_credentials.h">
- <Filter>src\core\lib\security\credentials\iam</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\json_token.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_credentials.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\jwt\jwt_verifier.h">
- <Filter>src\core\lib\security\credentials\jwt</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\oauth2\oauth2_credentials.h">
- <Filter>src\core\lib\security\credentials\oauth2</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\plugin\plugin_credentials.h">
- <Filter>src\core\lib\security\credentials\plugin</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\credentials\ssl\ssl_credentials.h">
- <Filter>src\core\lib\security\credentials\ssl</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\auth_filters.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\handshake.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\secure_endpoint.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\security_connector.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\transport\tsi_error.h">
- <Filter>src\core\lib\security\transport</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\b64.h">
- <Filter>src\core\lib\security\util</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\security\util\json_util.h">
- <Filter>src\core\lib\security\util</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\ext\transport\chttp2\alpn\alpn.h">
- <Filter>src\core\ext\transport\chttp2\alpn</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\fake_transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\ssl_types.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
- <ClInclude Include="$(SolutionDir)\..\src\core\lib\tsi\transport_security_interface.h">
- <Filter>src\core\lib\tsi</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
@@ -1112,90 +431,6 @@
<Filter Include="src">
<UniqueIdentifier>{cce6a85d-1111-3834-6825-31e170d93cff}</UniqueIdentifier>
</Filter>
- <Filter Include="src\core">
- <UniqueIdentifier>{595f2ea0-aafb-87e5-c938-db3ff0b0c69a}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext">
- <UniqueIdentifier>{52eca76b-9502-3d96-9064-6415226a860f}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport">
- <UniqueIdentifier>{8e70201f-3b54-d3cb-8b30-ebe0d96a9b2a}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport\chttp2">
- <UniqueIdentifier>{d505ab7b-5e44-f307-5361-500128965cdc}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\ext\transport\chttp2\alpn">
- <UniqueIdentifier>{d54bab94-cab9-803d-2737-5120774f1893}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib">
- <UniqueIdentifier>{cf8fd5d8-ff54-331d-2d20-36d6cae0e14b}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\channel">
- <UniqueIdentifier>{7e0225af-000b-4873-1c16-caffffbfd084}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\compression">
- <UniqueIdentifier>{0bbdbf56-83ad-bb4b-c4e2-a6d38c342179}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\debug">
- <UniqueIdentifier>{3875f7d7-ff11-c91d-0f98-810260cb554b}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\http">
- <UniqueIdentifier>{4bd405b9-af65-f0a6-d67a-433f75900668}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\iomgr">
- <UniqueIdentifier>{f4b146e4-8fba-83a6-1cc1-1262ebb785e8}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\json">
- <UniqueIdentifier>{b83c8e70-e491-f6f9-a08c-85f632bb61d2}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security">
- <UniqueIdentifier>{7e21ce26-45e2-6baf-037d-8ab4374077a9}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\context">
- <UniqueIdentifier>{613e655a-e5c0-9f0c-2bb4-62310a7329c0}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials">
- <UniqueIdentifier>{30bddf3f-0eda-9f2f-8171-d86b1e4896fc}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\composite">
- <UniqueIdentifier>{b34f8fa3-0fb9-4916-be6d-2a14a0794882}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\fake">
- <UniqueIdentifier>{7e11872b-bfbb-7d23-4783-e56909c520e8}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\google_default">
- <UniqueIdentifier>{212855e8-b7bc-d5bb-0734-dd28996f28de}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\iam">
- <UniqueIdentifier>{6d3828d0-5e5f-15c2-7d46-5d4039a88aad}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\jwt">
- <UniqueIdentifier>{b31e7015-364c-5701-31d0-644b1a8ae8c9}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\oauth2">
- <UniqueIdentifier>{43e3cb91-4101-1fee-6833-20f77ab7f4e5}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\plugin">
- <UniqueIdentifier>{727c0b51-4544-957f-45f2-00bf42ff7db9}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\credentials\ssl">
- <UniqueIdentifier>{606a441b-0d57-85d8-8079-1e6e502d18f1}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\transport">
- <UniqueIdentifier>{5b0b16ae-a8ad-81c3-afe4-8ac0b9e15311}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\security\util">
- <UniqueIdentifier>{56333427-0f81-b88b-bf49-a1b2f462023d}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\surface">
- <UniqueIdentifier>{1d59dcef-3358-d0ab-fa42-64da74065785}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\transport">
- <UniqueIdentifier>{ba865739-5dd9-6731-6772-48c25d45134f}</UniqueIdentifier>
- </Filter>
- <Filter Include="src\core\lib\tsi">
- <UniqueIdentifier>{dd4e4960-5bc8-395b-09c4-f2cbd6f6432b}</UniqueIdentifier>
- </Filter>
<Filter Include="src\cpp">
<UniqueIdentifier>{1e5fd68c-bd87-e803-42b0-75a7fa19b91d}</UniqueIdentifier>
</Filter>