aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/compiler/cpp_generator.cc15
-rw-r--r--src/core/ext/census/gen/census.pb.h2
-rw-r--r--src/core/ext/census/gen/trace_context.pb.h2
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/census/tracing.c10
-rw-r--r--src/core/ext/client_channel/client_channel.c29
-rw-r--r--src/core/ext/client_channel/connector.h3
-rw-r--r--src/core/ext/client_channel/resolver_registry.h4
-rw-r--r--src/core/ext/client_channel/subchannel.c67
-rw-r--r--src/core/ext/client_channel/subchannel.h15
-rw-r--r--src/core/ext/client_channel/subchannel_index.c12
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c66
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c12
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c12
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c84
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c5
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c2
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c7
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c4
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.c2
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.c14
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.h3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c374
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c13
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c16
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c152
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c196
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h20
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c41
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.h12
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.c34
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h3
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c69
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c6
-rw-r--r--src/core/ext/transport/cronet/client/secure/cronet_channel_create.c2
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_api_dummy.c30
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c189
-rw-r--r--src/core/lib/channel/channel_stack.c43
-rw-r--r--src/core/lib/channel/channel_stack.h19
-rw-r--r--src/core/lib/channel/compress_filter.c94
-rw-r--r--src/core/lib/channel/deadline_filter.c15
-rw-r--r--src/core/lib/channel/http_client_filter.c246
-rw-r--r--src/core/lib/channel/http_server_filter.c296
-rw-r--r--src/core/lib/channel/message_size_filter.c14
-rw-r--r--src/core/lib/compression/algorithm_metadata.h8
-rw-r--r--src/core/lib/compression/compression.c40
-rw-r--r--src/core/lib/http/httpcli_security_connector.c2
-rw-r--r--src/core/lib/iomgr/closure.c1
-rw-r--r--src/core/lib/iomgr/closure.h1
-rw-r--r--src/core/lib/iomgr/combiner.c12
-rw-r--r--src/core/lib/iomgr/error.c154
-rw-r--r--src/core/lib/iomgr/error.h13
-rw-r--r--src/core/lib/iomgr/error_internal.h54
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c2
-rw-r--r--src/core/lib/iomgr/ev_posix.c2
-rw-r--r--src/core/lib/iomgr/exec_ctx.c17
-rw-r--r--src/core/lib/iomgr/exec_ctx.h25
-rw-r--r--src/core/lib/iomgr/executor.c4
-rw-r--r--src/core/lib/iomgr/load_file.c2
-rw-r--r--src/core/lib/iomgr/resource_quota.c10
-rw-r--r--src/core/lib/iomgr/resource_quota.h8
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c2
-rw-r--r--src/core/lib/iomgr/tcp_posix.c4
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c2
-rw-r--r--src/core/lib/iomgr/tcp_uv.c3
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c2
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.c22
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c115
-rw-r--r--src/core/lib/security/transport/security_connector.c2
-rw-r--r--src/core/lib/security/transport/security_handshaker.c2
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c69
-rw-r--r--src/core/lib/security/util/b64.c2
-rw-r--r--src/core/lib/slice/slice.c130
-rw-r--r--src/core/lib/slice/slice_hash_table.c (renamed from src/core/lib/transport/mdstr_hash_table.c)69
-rw-r--r--src/core/lib/slice/slice_hash_table.h (renamed from src/core/lib/transport/mdstr_hash_table.h)42
-rw-r--r--src/core/lib/slice/slice_intern.c344
-rw-r--r--src/core/lib/slice/slice_internal.h15
-rw-r--r--src/core/lib/slice/slice_string_helpers.c5
-rw-r--r--src/core/lib/slice/slice_string_helpers.h5
-rw-r--r--src/core/lib/slice/slice_traits.h44
-rw-r--r--src/core/lib/surface/call.c988
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/call_details.c10
-rw-r--r--src/core/lib/surface/call_log_batch.c23
-rw-r--r--src/core/lib/surface/channel.c106
-rw-r--r--src/core/lib/surface/channel.h11
-rw-r--r--src/core/lib/surface/completion_queue.c15
-rw-r--r--src/core/lib/surface/init.c3
-rw-r--r--src/core/lib/surface/lame_client.c16
-rw-r--r--src/core/lib/surface/server.c142
-rw-r--r--src/core/lib/surface/validate_metadata.c61
-rw-r--r--src/core/lib/surface/validate_metadata.h43
-rw-r--r--src/core/lib/transport/connectivity_state.c1
-rw-r--r--src/core/lib/transport/error_utils.c124
-rw-r--r--src/core/lib/transport/error_utils.h56
-rw-r--r--src/core/lib/transport/http2_errors.h (renamed from src/core/ext/transport/chttp2/transport/http2_errors.h)36
-rw-r--r--src/core/lib/transport/metadata.c760
-rw-r--r--src/core/lib/transport/metadata.h137
-rw-r--r--src/core/lib/transport/metadata_batch.c242
-rw-r--r--src/core/lib/transport/metadata_batch.h76
-rw-r--r--src/core/lib/transport/method_config.c347
-rw-r--r--src/core/lib/transport/method_config.h139
-rw-r--r--src/core/lib/transport/service_config.c39
-rw-r--r--src/core/lib/transport/service_config.h10
-rw-r--r--src/core/lib/transport/static_metadata.c870
-rw-r--r--src/core/lib/transport/static_metadata.h672
-rw-r--r--src/core/lib/transport/status_conversion.c (renamed from src/core/ext/transport/chttp2/transport/status_conversion.c)38
-rw-r--r--src/core/lib/transport/status_conversion.h (renamed from src/core/ext/transport/chttp2/transport/status_conversion.h)19
-rw-r--r--src/core/lib/transport/timeout_encoding.c20
-rw-r--r--src/core/lib/transport/timeout_encoding.h4
-rw-r--r--src/core/lib/transport/transport.c99
-rw-r--r--src/core/lib/transport/transport.h34
-rw-r--r--src/core/lib/transport/transport_op_string.c26
-rw-r--r--src/cpp/client/channel_cc.cc18
-rw-r--r--src/cpp/client/secure_credentials.cc9
-rw-r--r--src/cpp/client/secure_credentials.h2
-rw-r--r--src/cpp/common/channel_arguments.cc8
-rw-r--r--src/cpp/common/channel_filter.cc8
-rw-r--r--src/cpp/common/channel_filter.h6
-rw-r--r--src/cpp/common/core_codegen.cc17
-rw-r--r--src/cpp/server/dynamic_thread_pool.cc5
-rw-r--r--src/cpp/server/secure_server_credentials.cc19
-rw-r--r--src/cpp/server/server_cc.cc26
-rw-r--r--src/cpp/server/server_context.cc7
-rw-r--r--src/cpp/test/server_context_test_spouse.cc7
-rw-r--r--src/cpp/util/slice_cc.cc2
-rw-r--r--src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs4
-rw-r--r--src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs10
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeMethods.cs13
-rw-r--r--src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs10
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c73
-rw-r--r--src/node/ext/byte_buffer.cc6
-rw-r--r--src/node/ext/call.cc130
-rw-r--r--src/node/ext/call.h17
-rw-r--r--src/node/ext/call_credentials.cc3
-rw-r--r--src/node/ext/channel.cc3
-rw-r--r--src/node/ext/node_grpc.cc16
-rw-r--r--src/node/ext/server.cc19
-rw-r--r--src/node/ext/slice.cc102
-rw-r--r--src/node/ext/slice.h52
-rw-r--r--src/objective-c/CronetFramework.podspec5
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+Cronet.h6
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+Cronet.m6
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.m6
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.m19
-rw-r--r--src/objective-c/GRPCClient/private/NSDictionary+GRPC.m20
-rw-r--r--src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m4
-rw-r--r--src/php/README.md7
-rw-r--r--src/php/ext/grpc/call.c47
-rw-r--r--src/php/ext/grpc/server.c10
-rw-r--r--src/proto/grpc/testing/BUILD30
-rw-r--r--src/proto/grpc/testing/duplicate/BUILD30
-rw-r--r--src/python/grpcio/_spawn_patch.py46
-rw-r--r--src/python/grpcio/commands.py352
-rw-r--r--src/python/grpcio/grpc/__init__.py767
-rw-r--r--src/python/grpcio/grpc/_auth.py83
-rw-r--r--src/python/grpcio/grpc/_channel.py1529
-rw-r--r--src/python/grpcio/grpc/_common.py123
-rw-r--r--src/python/grpcio/grpc/_credential_composition.py18
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi15
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi14
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi35
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi12
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi129
-rw-r--r--src/python/grpcio/grpc/_plugin_wrapping.py123
-rw-r--r--src/python/grpcio/grpc/_server.py1094
-rw-r--r--src/python/grpcio/grpc/_utilities.py235
-rw-r--r--src/python/grpcio/grpc/beta/_client_adaptations.py1028
-rw-r--r--src/python/grpcio/grpc/beta/_connectivity_channel.py213
-rw-r--r--src/python/grpcio/grpc/beta/_server_adaptations.py544
-rw-r--r--src/python/grpcio/grpc/beta/implementations.py173
-rw-r--r--src/python/grpcio/grpc/beta/interfaces.py78
-rw-r--r--src/python/grpcio/grpc/beta/utilities.py202
-rw-r--r--src/python/grpcio/grpc/framework/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/common/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/common/cardinality.py11
-rw-r--r--src/python/grpcio/grpc/framework/common/style.py7
-rw-r--r--src/python/grpcio/grpc/framework/foundation/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/foundation/abandonment.py3
-rw-r--r--src/python/grpcio/grpc/framework/foundation/callable_util.py47
-rw-r--r--src/python/grpcio/grpc/framework/foundation/future.py129
-rw-r--r--src/python/grpcio/grpc/framework/foundation/logging_pool.py55
-rw-r--r--src/python/grpcio/grpc/framework/foundation/stream.py28
-rw-r--r--src/python/grpcio/grpc/framework/foundation/stream_util.py227
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/base/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/base/base.py184
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/base/utilities.py42
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/face/__init__.py2
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/face/face.py610
-rw-r--r--src/python/grpcio/grpc/framework/interfaces/face/utilities.py87
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py6
-rw-r--r--src/python/grpcio/support.py103
-rw-r--r--src/python/grpcio_health_checking/grpc_health/__init__.py2
-rw-r--r--src/python/grpcio_health_checking/grpc_health/v1/__init__.py2
-rw-r--r--src/python/grpcio_health_checking/grpc_health/v1/health.py33
-rw-r--r--src/python/grpcio_health_checking/health_commands.py53
-rw-r--r--src/python/grpcio_health_checking/setup.py14
-rw-r--r--src/python/grpcio_reflection/grpc_reflection/__init__.py1
-rw-r--r--src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py1
-rw-r--r--src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py168
-rw-r--r--src/python/grpcio_reflection/reflection_commands.py57
-rw-r--r--src/python/grpcio_reflection/setup.py14
-rw-r--r--src/python/grpcio_tests/commands.py266
-rw-r--r--src/python/grpcio_tests/setup.py42
-rw-r--r--src/python/grpcio_tests/tests/_loader.py89
-rw-r--r--src/python/grpcio_tests/tests/_result.py618
-rw-r--r--src/python/grpcio_tests/tests/_runner.py273
-rw-r--r--src/python/grpcio_tests/tests/health_check/_health_servicer_test.py87
-rw-r--r--src/python/grpcio_tests/tests/http2/_negative_http2_client.py153
-rw-r--r--src/python/grpcio_tests/tests/http2/negative_http2_client.py175
-rw-r--r--src/python/grpcio_tests/tests/interop/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py24
-rw-r--r--src/python/grpcio_tests/tests/interop/_intraop_test_case.py38
-rw-r--r--src/python/grpcio_tests/tests/interop/_secure_intraop_test.py35
-rw-r--r--src/python/grpcio_tests/tests/interop/client.py163
-rw-r--r--src/python/grpcio_tests/tests/interop/methods.py759
-rw-r--r--src/python/grpcio_tests/tests/interop/resources.py21
-rw-r--r--src/python/grpcio_tests/tests/interop/server.py57
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py754
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py446
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py716
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/qps/benchmark_client.py280
-rw-r--r--src/python/grpcio_tests/tests/qps/benchmark_server.py32
-rw-r--r--src/python/grpcio_tests/tests/qps/client_runner.py107
-rw-r--r--src/python/grpcio_tests/tests/qps/histogram.py82
-rw-r--r--src/python/grpcio_tests/tests/qps/qps_worker.py34
-rw-r--r--src/python/grpcio_tests/tests/qps/worker_server.py297
-rw-r--r--src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py234
-rw-r--r--src/python/grpcio_tests/tests/stress/client.py217
-rw-r--r--src/python/grpcio_tests/tests/stress/metrics_server.py41
-rw-r--r--src/python/grpcio_tests/tests/stress/test_runner.py59
-rw-r--r--src/python/grpcio_tests/tests/unit/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_api_test.py117
-rw-r--r--src/python/grpcio_tests/tests/unit/_auth_test.py69
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_args_test.py20
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py209
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py108
-rw-r--r--src/python/grpcio_tests/tests/unit/_compression_test.py147
-rw-r--r--src/python/grpcio_tests/tests/unit/_credentials_test.py56
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py302
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_channel_test.py55
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py402
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py710
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/test_utilities.py45
-rw-r--r--src/python/grpcio_tests/tests/unit/_empty_message_test.py125
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_scenarios.py269
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_test.py243
-rw-r--r--src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py223
-rw-r--r--src/python/grpcio_tests/tests/unit/_invocation_defects_test.py316
-rw-r--r--src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py209
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py901
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_test.py254
-rw-r--r--src/python/grpcio_tests/tests/unit/_rpc_test.py1414
-rw-r--r--src/python/grpcio_tests/tests/unit/_sanity/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py32
-rw-r--r--src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py143
-rw-r--r--src/python/grpcio_tests/tests/unit/_thread_pool.py25
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py528
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py15
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py160
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_implementations_test.py44
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_not_found_test.py58
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/_utilities_test.py113
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/test_utilities.py17
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/common/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/common/test_constants.py1
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/common/test_control.py97
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py117
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py64
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py55
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py1
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py465
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py550
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py876
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py187
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py159
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py519
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py27
-rw-r--r--src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py144
-rw-r--r--src/python/grpcio_tests/tests/unit/resources.py11
-rw-r--r--src/python/grpcio_tests/tests/unit/test_common.py69
-rw-r--r--src/ruby/ext/grpc/rb_byte_buffer.c7
-rw-r--r--src/ruby/ext/grpc/rb_byte_buffer.h3
-rw-r--r--src/ruby/ext/grpc/rb_call.c93
-rw-r--r--src/ruby/ext/grpc/rb_channel.c27
-rw-r--r--src/ruby/ext/grpc/rb_compression_options.c15
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c30
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h55
-rw-r--r--src/ruby/ext/grpc/rb_server.c10
311 files changed, 18575 insertions, 16647 deletions
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index a26eeb46b9..e481aaf811 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -1218,13 +1218,15 @@ void PrintSourceService(Printer *printer, const Service *service,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Service"] = service->name();
- printer->Print(*vars,
- "static const char* $prefix$$Service$_method_names[] = {\n");
- for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["Method"] = service->method(i).get()->name();
- printer->Print(*vars, " \"/$Package$$Service$/$Method$\",\n");
+ if (service->method_count() > 0) {
+ printer->Print(*vars,
+ "static const char* $prefix$$Service$_method_names[] = {\n");
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["Method"] = service->method(i).get()->name();
+ printer->Print(*vars, " \"/$Package$$Service$/$Method$\",\n");
+ }
+ printer->Print(*vars, "};\n\n");
}
- printer->Print(*vars, "};\n\n");
printer->Print(*vars,
"std::unique_ptr< $ns$$Service$::Stub> $ns$$Service$::NewStub("
@@ -1272,7 +1274,6 @@ void PrintSourceService(Printer *printer, const Service *service,
printer->Print(*vars, "$ns$$Service$::Service::Service() {\n");
printer->Indent();
- printer->Print(*vars, "(void)$prefix$$Service$_method_names;\n");
for (int i = 0; i < service->method_count(); ++i) {
auto method = service->method(i);
(*vars)["Idx"] = as_string(i);
diff --git a/src/core/ext/census/gen/census.pb.h b/src/core/ext/census/gen/census.pb.h
index dae583f33d..c8546eac2e 100644
--- a/src/core/ext/census/gen/census.pb.h
+++ b/src/core/ext/census/gen/census.pb.h
@@ -292,4 +292,4 @@ extern const pb_field_t google_census_Metric_fields[5];
} /* extern "C" */
#endif
-#endif
+#endif /* GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H */
diff --git a/src/core/ext/census/gen/trace_context.pb.h b/src/core/ext/census/gen/trace_context.pb.h
index 263c4c58cb..cfb2f04ccd 100644
--- a/src/core/ext/census/gen/trace_context.pb.h
+++ b/src/core/ext/census/gen/trace_context.pb.h
@@ -96,4 +96,4 @@ extern const pb_field_t google_trace_TraceContext_fields[4];
} /* extern "C" */
#endif
-#endif
+#endif /* GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H */
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 8e4d4732b8..65cfe1fa90 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -67,9 +67,7 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
channel_data *chand) {
grpc_linked_mdelem *m;
for (m = md->list.head; m != NULL; m = m->next) {
- if (m->md->key == GRPC_MDSTR_PATH) {
- gpr_log(GPR_DEBUG, "%s",
- (const char *)GRPC_SLICE_START_PTR(m->md->value->slice));
+ if (grpc_slice_eq(GRPC_MDKEY(m->md), GRPC_MDSTR_PATH)) {
/* Add method tag here */
}
}
diff --git a/src/core/ext/census/tracing.c b/src/core/ext/census/tracing.c
index 3b5d6dab2b..9371fffc8d 100644
--- a/src/core/ext/census/tracing.c
+++ b/src/core/ext/census/tracing.c
@@ -31,15 +31,21 @@
*
*/
+//#include "src/core/ext/census/tracing.h"
+
#include <grpc/census.h>
+#include <stdlib.h>
/* TODO(aveitch): These are all placeholder implementations. */
int census_trace_mask(const census_context *context) {
+ abort();
return CENSUS_TRACE_MASK_NONE;
}
-void census_set_trace_mask(int trace_mask) {}
+void census_set_trace_mask(int trace_mask) { abort(); }
void census_trace_print(census_context *context, uint32_t type,
- const char *buffer, size_t n) {}
+ const char *buffer, size_t n) {
+ abort();
+}
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 2f25fef9a7..74350d9fee 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -52,6 +52,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -87,7 +88,7 @@ static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *p) {
gpr_free(p);
}
-static const grpc_mdstr_hash_table_vtable method_parameters_vtable = {
+static const grpc_slice_hash_table_vtable method_parameters_vtable = {
method_parameters_free, method_parameters_copy};
static void *method_parameters_create_from_json(const grpc_json *json) {
@@ -165,7 +166,7 @@ typedef struct client_channel_channel_data {
/** service config in JSON form */
char *service_config_json;
/** maps method names to method_parameters structs */
- grpc_mdstr_hash_table *method_params_table;
+ grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args *resolver_result;
/** a list of closures that are all waiting for config to come in */
@@ -267,7 +268,7 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
char *lb_policy_name = NULL;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
- grpc_mdstr_hash_table *method_params_table = NULL;
+ grpc_slice_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
@@ -362,7 +363,7 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->service_config_json = service_config_json;
}
if (chand->method_params_table != NULL) {
- grpc_mdstr_hash_table_unref(exec_ctx, chand->method_params_table);
+ grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
chand->method_params_table = method_params_table;
if (lb_policy != NULL) {
@@ -558,7 +559,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_free(chand->lb_policy_name);
gpr_free(chand->service_config_json);
if (chand->method_params_table != NULL) {
- grpc_mdstr_hash_table_unref(exec_ctx, chand->method_params_table);
+ grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
@@ -593,7 +594,7 @@ typedef struct client_channel_call_data {
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
- grpc_mdstr *path; // Request path.
+ grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
wait_for_ready_value wait_for_ready_from_service_config;
@@ -997,10 +998,10 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
if (error == GRPC_ERROR_NONE) {
// Get the method config table from channel data.
gpr_mu_lock(&chand->mu);
- grpc_mdstr_hash_table *method_params_table = NULL;
+ grpc_slice_hash_table *method_params_table = NULL;
if (chand->method_params_table != NULL) {
method_params_table =
- grpc_mdstr_hash_table_ref(chand->method_params_table);
+ grpc_slice_hash_table_ref(chand->method_params_table);
}
gpr_mu_unlock(&chand->mu);
// If the method config table was present, use it.
@@ -1029,7 +1030,7 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&calld->mu);
}
}
- grpc_mdstr_hash_table_unref(exec_ctx, method_params_table);
+ grpc_slice_hash_table_unref(exec_ctx, method_params_table);
}
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
@@ -1043,7 +1044,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
- calld->path = GRPC_MDSTR_REF(args->path);
+ calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
@@ -1067,8 +1068,8 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
if (chand->method_params_table != NULL) {
- grpc_mdstr_hash_table *method_params_table =
- grpc_mdstr_hash_table_ref(chand->method_params_table);
+ grpc_slice_hash_table *method_params_table =
+ grpc_slice_hash_table_ref(chand->method_params_table);
gpr_mu_unlock(&chand->mu);
method_parameters *method_params = grpc_method_config_table_get(
exec_ctx, method_params_table, args->path);
@@ -1084,7 +1085,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
method_params->wait_for_ready;
}
}
- grpc_mdstr_hash_table_unref(exec_ctx, method_params_table);
+ grpc_slice_hash_table_unref(exec_ctx, method_params_table);
} else {
gpr_mu_unlock(&chand->mu);
}
@@ -1113,7 +1114,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
- GRPC_MDSTR_UNREF(exec_ctx, calld->path);
+ grpc_slice_unref_internal(exec_ctx, calld->path);
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
diff --git a/src/core/ext/client_channel/connector.h b/src/core/ext/client_channel/connector.h
index 3de061620e..395f89b3b2 100644
--- a/src/core/ext/client_channel/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -48,9 +48,6 @@ struct grpc_connector {
typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
- /** address to connect to */
- const grpc_resolved_address *addr;
- size_t addr_len;
/** initial connect string to send */
grpc_slice initial_connect_string;
/** deadline for connection */
diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index 4fb16131db..a4606463eb 100644
--- a/src/core/ext/client_channel/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -60,7 +60,9 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
return it.
If a resolver factory was not found, return NULL.
\a args is a set of channel arguments to be included in the result
- (typically the set of arguments passed in from the client API). */
+ (typically the set of arguments passed in from the client API).
+ \a pollset_set is used to drive IO in the name resolution process, it
+ should not be NULL. */
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
grpc_pollset_set *pollset_set);
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 1bac82b451..b7379b30b3 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -38,12 +38,16 @@
#include <grpc/support/alloc.h>
#include <grpc/support/avl.h>
+#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/client_channel.h"
#include "src/core/ext/client_channel/initial_connect_string.h"
+#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/subchannel_index.h"
+#include "src/core/ext/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -95,8 +99,6 @@ struct grpc_subchannel {
size_t num_filters;
/** channel arguments */
grpc_channel_args *args;
- /** address to connect to */
- grpc_resolved_address *addr;
grpc_subchannel_key *key;
@@ -211,7 +213,6 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel *c = arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
- gpr_free(c->addr);
grpc_slice_unref_internal(exec_ctx, c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
@@ -327,12 +328,17 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
} else {
c->filters = NULL;
}
- c->addr = gpr_malloc(sizeof(grpc_resolved_address));
- if (args->addr->len)
- memcpy(c->addr, args->addr, sizeof(grpc_resolved_address));
c->pollset_set = grpc_pollset_set_create();
- grpc_set_initial_connect_string(&c->addr, &c->initial_connect_string);
- c->args = grpc_channel_args_copy(args->args);
+ grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
+ grpc_get_subchannel_address_arg(args->args, addr);
+ grpc_set_initial_connect_string(&addr, &c->initial_connect_string);
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
+ grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
+ gpr_free(addr);
+ c->args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1);
+ gpr_free(new_arg.value.string);
+
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c,
@@ -385,7 +391,6 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
- args.addr = c->addr;
args.deadline = c->next_attempt;
args.channel_args = c->args;
args.initial_connect_string = c->initial_connect_string;
@@ -620,9 +625,8 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con);
if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", msg);
- grpc_error_free_string(msg);
+ gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
+ grpc_error_string(error));
GRPC_ERROR_UNREF(error);
abort(); /* TODO(ctiller): what to do here? */
}
@@ -687,7 +691,6 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
const char *errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
- grpc_error_free_string(errmsg);
maybe_start_connecting_locked(exec_ctx, c);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
@@ -746,7 +749,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
+ grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
gpr_timespec deadline, grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@@ -758,7 +761,7 @@ grpc_error *grpc_connected_subchannel_create_call(
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
- grpc_error_free_string(error_string);
+
gpr_free(*call);
return error;
}
@@ -771,3 +774,37 @@ grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
}
+
+static void grpc_uri_to_sockaddr(char *uri_str, grpc_resolved_address *addr) {
+ grpc_uri *uri = grpc_uri_parse(uri_str, 0 /* suppress_errors */);
+ GPR_ASSERT(uri != NULL);
+ if (strcmp(uri->scheme, "ipv4") == 0) {
+ GPR_ASSERT(parse_ipv4(uri, addr));
+ } else if (strcmp(uri->scheme, "ipv6") == 0) {
+ GPR_ASSERT(parse_ipv6(uri, addr));
+ } else {
+ GPR_ASSERT(parse_unix(uri, addr));
+ }
+ grpc_uri_destroy(uri);
+}
+
+void grpc_get_subchannel_address_arg(const grpc_channel_args *args,
+ grpc_resolved_address *addr) {
+ const grpc_arg *addr_arg =
+ grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS);
+ GPR_ASSERT(addr_arg != NULL); // Should have been set by LB policy.
+ GPR_ASSERT(addr_arg->type == GRPC_ARG_STRING);
+ memset(addr, 0, sizeof(*addr));
+ if (*addr_arg->value.string != '\0') {
+ grpc_uri_to_sockaddr(addr_arg->value.string, addr);
+ }
+}
+
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
+ grpc_arg new_arg;
+ new_arg.key = GRPC_ARG_SUBCHANNEL_ADDRESS;
+ new_arg.type = GRPC_ARG_STRING;
+ new_arg.value.string =
+ addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("");
+ return new_arg;
+}
diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 24aa9f73dc..9bd35a7704 100644
--- a/src/core/ext/client_channel/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -40,6 +40,9 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
+// Channel arg containing a grpc_resolved_address to connect to.
+#define GRPC_ARG_SUBCHANNEL_ADDRESS "grpc.subchannel_address"
+
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
typedef struct grpc_subchannel grpc_subchannel;
@@ -111,7 +114,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
+ grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
gpr_timespec deadline, grpc_subchannel_call **subchannel_call);
/** process a transport level op */
@@ -164,8 +167,6 @@ struct grpc_subchannel_args {
size_t filter_count;
/** Channel arguments to be supplied to the newly created channel */
const grpc_channel_args *args;
- /** Address to connect to */
- grpc_resolved_address *addr;
};
/** create a subchannel given a connector */
@@ -173,4 +174,12 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
const grpc_subchannel_args *args);
+/// Sets \a addr from \a args.
+void grpc_get_subchannel_address_arg(const grpc_channel_args *args,
+ grpc_resolved_address *addr);
+
+/// Returns a new channel arg encoding the subchannel address as a string.
+/// Caller is responsible for freeing the string.
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr);
+
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/client_channel/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c
index 1ebe03ef11..11889300a2 100644
--- a/src/core/ext/client_channel/subchannel_index.c
+++ b/src/core/ext/client_channel/subchannel_index.c
@@ -86,11 +86,6 @@ static grpc_subchannel_key *create_key(
} else {
k->args.filters = NULL;
}
- k->args.addr = gpr_malloc(sizeof(grpc_resolved_address));
- k->args.addr->len = args->addr->len;
- if (k->args.addr->len > 0) {
- memcpy(k->args.addr, args->addr, sizeof(grpc_resolved_address));
- }
k->args.args = copy_channel_args(args->args);
return k;
}
@@ -108,14 +103,8 @@ static int subchannel_key_compare(grpc_subchannel_key *a,
grpc_subchannel_key *b) {
int c = GPR_ICMP(a->connector, b->connector);
if (c != 0) return c;
- c = GPR_ICMP(a->args.addr->len, b->args.addr->len);
- if (c != 0) return c;
c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
- if (a->args.addr->len) {
- c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len);
- if (c != 0) return c;
- }
if (a->args.filter_count > 0) {
c = memcmp(a->args.filters, b->args.filters,
a->args.filter_count * sizeof(*a->args.filters));
@@ -129,7 +118,6 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
grpc_connector_unref(exec_ctx, k->connector);
gpr_free((grpc_channel_args *)k->args.filters);
grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)k->args.args);
- gpr_free(k->args.addr);
gpr_free(k);
}
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index 97f98df03a..ded457f64a 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -135,13 +135,13 @@ int grpc_lb_glb_trace = 0;
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
-static void initial_metadata_add_lb_token(
- grpc_metadata_batch *initial_metadata,
- grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem *lb_token) {
+static grpc_error *initial_metadata_add_lb_token(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
+ grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
GPR_ASSERT(lb_token_mdelem_storage != NULL);
- GPR_ASSERT(lb_token != NULL);
- grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
- lb_token);
+ GPR_ASSERT(!GRPC_MDISNULL(lb_token));
+ return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
+ lb_token_mdelem_storage, lb_token);
}
typedef struct wrapped_rr_closure_arg {
@@ -161,7 +161,7 @@ typedef struct wrapped_rr_closure_arg {
grpc_connected_subchannel **target;
/* the LB token associated with the pick */
- grpc_mdelem *lb_token;
+ grpc_mdelem lb_token;
/* storage for the lb token initial metadata mdelem */
grpc_linked_mdelem *lb_token_mdelem_storage;
@@ -188,8 +188,8 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
* addresses failed to connect). There won't be any user_data/token
* available */
if (*wc_arg->target != NULL) {
- if (wc_arg->lb_token != NULL) {
- initial_metadata_add_lb_token(wc_arg->initial_metadata,
+ if (!GRPC_MDISNULL(wc_arg->lb_token)) {
+ initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
} else {
@@ -345,8 +345,7 @@ typedef struct glb_lb_policy {
/* call status code and details, set in lb_on_server_status_received() */
grpc_status_code lb_call_status;
- char *lb_call_status_details;
- size_t lb_call_status_details_capacity;
+ grpc_slice lb_call_status_details;
/** LB call retry backoff state */
gpr_backoff lb_call_backoff_state;
@@ -388,10 +387,14 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
/* vtable for LB tokens in grpc_lb_addresses. */
static void *lb_token_copy(void *token) {
- return token == NULL ? NULL : GRPC_MDELEM_REF(token);
+ return token == NULL
+ ? NULL
+ : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
}
static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
- if (token != NULL) GRPC_MDELEM_UNREF(exec_ctx, token);
+ if (token != NULL) {
+ GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
+ }
}
static int lb_token_cmp(void *token1, void *token2) {
if (token1 > token2) return 1;
@@ -459,10 +462,11 @@ static grpc_lb_addresses *process_serverlist_locked(
GPR_ARRAY_SIZE(server->load_balance_token);
const size_t lb_token_length =
strnlen(server->load_balance_token, lb_token_max_length);
- grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
- (uint8_t *)server->load_balance_token, lb_token_length);
- user_data = grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_LB_TOKEN, lb_token_mdstr);
+ grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
+ server->load_balance_token, lb_token_length);
+ user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
+ lb_token_mdstr)
+ .payload;
} else {
char *uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
@@ -470,7 +474,7 @@ static grpc_lb_addresses *process_serverlist_locked(
"be used instead",
uri);
gpr_free(uri);
- user_data = GRPC_MDELEM_LB_TOKEN_EMPTY;
+ user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@@ -564,7 +568,7 @@ static bool pick_from_internal_rr_locked(
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
/* add the load reporting initial metadata */
- initial_metadata_add_lb_token(pick_args->initial_metadata,
+ initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
@@ -1103,11 +1107,12 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
+ grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
- "/grpc.lb.v1.LoadBalancer/BalanceLoad", glb_policy->server_name,
- glb_policy->deadline, NULL);
+ GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
+ &host, glb_policy->deadline, NULL);
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
@@ -1120,9 +1125,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- glb_policy->lb_call_status_details = NULL;
- glb_policy->lb_call_status_details_capacity = 0;
-
grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received, glb_policy,
grpc_schedule_on_exec_ctx);
@@ -1138,7 +1140,8 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
}
-static void lb_call_destroy_locked(glb_lb_policy *glb_policy) {
+static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_destroy(glb_policy->lb_call);
glb_policy->lb_call = NULL;
@@ -1147,7 +1150,7 @@ static void lb_call_destroy_locked(glb_lb_policy *glb_policy) {
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
- gpr_free(glb_policy->lb_call_status_details);
+ grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
}
/*
@@ -1196,8 +1199,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
op->data.recv_status_on_client.status_details =
&glb_policy->lb_call_status_details;
- op->data.recv_status_on_client.status_details_capacity =
- &glb_policy->lb_call_status_details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
@@ -1340,15 +1341,18 @@ static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(glb_policy->lb_call != NULL);
if (grpc_lb_glb_trace) {
+ char *status_details =
+ grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_DEBUG,
"Status from LB server received. Status = %d, Details = '%s', "
"(call: %p)",
- glb_policy->lb_call_status, glb_policy->lb_call_status_details,
+ glb_policy->lb_call_status, status_details,
(void *)glb_policy->lb_call);
+ gpr_free(status_details);
}
- /* We need to performe cleanups no matter what. */
- lb_call_destroy_locked(glb_policy);
+ /* We need to perform cleanups no matter what. */
+ lb_call_destroy_locked(exec_ctx, glb_policy);
if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 821becff69..9f2aa461be 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -36,7 +36,9 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
typedef struct pending_pick {
@@ -466,11 +468,15 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- sc_args.addr = &addresses->addresses[i].address;
- sc_args.args = args->args;
-
+ grpc_arg addr_arg =
+ grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
+ grpc_channel_args *new_args =
+ grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ gpr_free(addr_arg.value.string);
+ sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
+ grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel != NULL) {
p->subchannels[subchannel_idx++] = subchannel;
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index cb679489c3..d17d8fa057 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -64,8 +64,10 @@
#include <grpc/support/alloc.h>
#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -729,11 +731,15 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
if (addresses->addresses[i].is_balancer) continue;
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- sc_args.addr = &addresses->addresses[i].address;
- sc_args.args = args->args;
-
+ grpc_arg addr_arg =
+ grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
+ grpc_channel_args *new_args =
+ grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ gpr_free(addr_arg.value.string);
+ sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
+ grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel != NULL) {
subchannel_data *sd = gpr_malloc(sizeof(*sd));
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 07ef10e6a8..8af6191c3b 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -41,13 +41,17 @@
#include "src/core/ext/load_reporting/load_reporting_filter.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
typedef struct call_data {
intptr_t id; /**< an id unique to the call */
- char *trailing_md_string;
- char *initial_md_string;
- const char *service_method;
+ bool have_trailing_md_string;
+ grpc_slice trailing_md_string;
+ bool have_initial_md_string;
+ grpc_slice initial_md_string;
+ bool have_service_method;
+ grpc_slice service_method;
/* stores the recv_initial_metadata op's ready closure, which we wrap with our
* own (on_initial_md_ready) in order to capture the incoming initial metadata
@@ -63,42 +67,28 @@ typedef struct channel_data {
intptr_t id; /**< an id unique to the channel */
} channel_data;
-typedef struct {
- grpc_call_element *elem;
- grpc_exec_ctx *exec_ctx;
-} recv_md_filter_args;
-
-static grpc_mdelem *recv_md_filter(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_mdelem *md) {
- recv_md_filter_args *a = user_data;
- grpc_call_element *elem = a->elem;
- call_data *calld = elem->call_data;
-
- if (md->key == GRPC_MDSTR_PATH) {
- calld->service_method = grpc_mdstr_as_c_string(md->value);
- } else if (md->key == GRPC_MDSTR_LB_TOKEN) {
- calld->initial_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
- return NULL;
- }
-
- return md;
-}
-
static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (err == GRPC_ERROR_NONE) {
- recv_md_filter_args a;
- a.elem = elem;
- a.exec_ctx = exec_ctx;
- grpc_metadata_batch_filter(exec_ctx, calld->recv_initial_metadata,
- recv_md_filter, &a);
- if (calld->service_method == NULL) {
+ if (calld->recv_initial_metadata->idx.named.path != NULL) {
+ calld->service_method = grpc_slice_ref_internal(
+ GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
+ calld->have_service_method = true;
+ } else {
err =
grpc_error_add_child(err, GRPC_ERROR_CREATE("Missing :path header"));
}
+ if (calld->recv_initial_metadata->idx.named.lb_token != NULL) {
+ calld->initial_md_string = grpc_slice_ref_internal(
+ GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.lb_token->md));
+ calld->have_initial_md_string = true;
+ grpc_metadata_batch_remove(
+ exec_ctx, calld->recv_initial_metadata,
+ calld->recv_initial_metadata->idx.named.lb_token);
+ }
} else {
GRPC_ERROR_REF(err);
}
@@ -149,8 +139,15 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
calld->service_method};
*/
- gpr_free(calld->initial_md_string);
- gpr_free(calld->trailing_md_string);
+ if (calld->have_initial_md_string) {
+ grpc_slice_unref_internal(exec_ctx, calld->initial_md_string);
+ }
+ if (calld->have_trailing_md_string) {
+ grpc_slice_unref_internal(exec_ctx, calld->trailing_md_string);
+ }
+ if (calld->have_service_method) {
+ grpc_slice_unref_internal(exec_ctx, calld->service_method);
+ }
}
/* Constructor for channel_data */
@@ -193,19 +190,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
*/
}
-static grpc_mdelem *lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
-
- if (md->key == GRPC_MDSTR_LB_COST_BIN) {
- calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
- return NULL;
- }
-
- return md;
-}
-
static void lr_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
@@ -218,8 +202,14 @@ static void lr_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
calld->ops_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->on_initial_md_ready;
} else if (op->send_trailing_metadata) {
- grpc_metadata_batch_filter(exec_ctx, op->send_trailing_metadata,
- lr_trailing_md_filter, elem);
+ if (op->send_trailing_metadata->idx.named.lb_cost_bin != NULL) {
+ calld->trailing_md_string = grpc_slice_ref_internal(
+ GRPC_MDVALUE(op->send_trailing_metadata->idx.named.lb_cost_bin->md));
+ calld->have_trailing_md_string = true;
+ grpc_metadata_batch_remove(
+ exec_ctx, op->send_trailing_metadata,
+ op->send_trailing_metadata->idx.named.lb_cost_bin);
+ }
}
grpc_call_next_op(exec_ctx, elem, op);
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index 655d9dc586..58fe9d3985 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -188,9 +188,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
gpr_timespec timeout = gpr_time_sub(next_try, now);
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", msg);
- grpc_error_free_string(msg);
+ gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
+ grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index c146a627cb..a1365f6465 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -182,7 +182,7 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
- char *part_str = grpc_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
+ char *part_str = grpc_slice_to_c_string(path_parts.slices[i]);
ith_uri.path = part_str;
if (!parse(&ith_uri, &addresses->addresses[i].address)) {
errors_found = true; /* GPR_TRUE */
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index 2c5dfaea60..013c96dc70 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -43,6 +43,7 @@
#include "src/core/ext/client_channel/connector.h"
#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@@ -220,6 +221,8 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
grpc_connect_out_args *result,
grpc_closure *notify) {
chttp2_connector *c = (chttp2_connector *)con;
+ grpc_resolved_address addr;
+ grpc_get_subchannel_address_arg(args->channel_args, &addr);
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->notify == NULL);
c->notify = notify;
@@ -231,8 +234,8 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!c->connecting);
c->connecting = true;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
- args->interested_parties, args->channel_args,
- args->addr, args->deadline);
+ args->interested_parties, args->channel_args, &addr,
+ args->deadline);
gpr_mu_unlock(&c->mu);
}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index 574d1a7710..56a1a0de9b 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -121,7 +121,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
- grpc_error_free_string(error_str);
+
if (error == GRPC_ERROR_NONE && args->endpoint != NULL) {
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
@@ -307,7 +307,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
const char *warning_message = grpc_error_string(err);
gpr_log(GPR_INFO, "WARNING: %s", warning_message);
- grpc_error_free_string(warning_message);
+
/* we managed to bind some addresses: continue */
}
grpc_resolved_addresses_destroy(resolved);
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index bf5026bea6..c219a7d85f 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -51,7 +51,7 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
if (err != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
- grpc_error_free_string(msg);
+
GRPC_ERROR_UNREF(err);
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index 395c79a71d..cb2b3f5502 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -94,7 +94,7 @@ done:
if (err != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
- grpc_error_free_string(msg);
+
GRPC_ERROR_UNREF(err);
}
return port_num;
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.c b/src/core/ext/transport/chttp2/transport/bin_decoder.c
index 8db36e4a7f..8c87de112e 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.c
@@ -157,7 +157,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
"grpc_chttp2_base64_decode has a length of %d, which is not a "
"multiple of 4.\n",
(int)input_length);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
if (input_length > 0) {
@@ -178,11 +178,11 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
ctx.contains_tail = false;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = grpc_dump_slice(input, GPR_DUMP_ASCII);
+ char *s = grpc_slice_to_c_string(input);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
grpc_slice_unref_internal(exec_ctx, output);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
GPR_ASSERT(ctx.input_cur == GRPC_SLICE_END_PTR(input));
@@ -204,7 +204,7 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
"has a tail of 1 byte.\n",
(int)input_length);
grpc_slice_unref_internal(exec_ctx, output);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
if (output_length > input_length / 4 * 3 + tail_xtra[input_length % 4]) {
@@ -214,7 +214,7 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
(int)output_length,
(int)(input_length / 4 * 3 + tail_xtra[input_length % 4]));
grpc_slice_unref_internal(exec_ctx, output);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
ctx.input_cur = GRPC_SLICE_START_PTR(input);
@@ -224,11 +224,11 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
ctx.contains_tail = true;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = grpc_dump_slice(input, GPR_DUMP_ASCII);
+ char *s = grpc_slice_to_c_string(input);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
grpc_slice_unref_internal(exec_ctx, output);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
GPR_ASSERT(ctx.input_cur <= GRPC_SLICE_END_PTR(input));
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.c b/src/core/ext/transport/chttp2/transport/bin_encoder.c
index af25a4352a..e301c073f3 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.c
@@ -177,8 +177,7 @@ static void enc_add1(huff_out *out, uint8_t a) {
enc_flush_some(out);
}
-grpc_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(
- grpc_slice input) {
+grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.h b/src/core/ext/transport/chttp2/transport/bin_encoder.h
index 477559d0e2..0f899c8e34 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.h
@@ -49,7 +49,6 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input);
grpc_slice y = grpc_chttp2_huffman_compress(x);
grpc_slice_unref_internal(exec_ctx, x);
return y; */
-grpc_slice grpc_chttp2_base64_encode_and_huffman_compress_impl(
- grpc_slice input);
+grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index bd87253ed3..59b21e3330 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -31,14 +31,11 @@
*
*/
-#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
void grpc_chttp2_plugin_init(void) {
- grpc_chttp2_base64_encode_and_huffman_compress =
- grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index eefe21e4f1..022f97daf7 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -44,9 +44,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
-#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/parser.h"
@@ -55,7 +53,10 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/error_utils.h"
+#include "src/core/lib/transport/http2_errors.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/timeout_encoding.h"
#include "src/core/lib/transport/transport_impl.h"
@@ -441,7 +442,7 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_error_add_child(t->close_transport_on_writes_finished, error);
return;
}
- if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
+ if (!grpc_error_has_clear_grpc_status(error)) {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE);
}
@@ -888,7 +889,6 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
(int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
(int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
desc, errstr);
- grpc_error_free_string(errstr);
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@@ -917,12 +917,9 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
static bool contains_non_ok_status(grpc_metadata_batch *batch) {
- grpc_linked_mdelem *l;
- for (l = batch->list.head; l; l = l->next) {
- if (l->md->key == GRPC_MDSTR_GRPC_STATUS &&
- l->md != GRPC_MDELEM_GRPC_STATUS_0) {
- return true;
- }
+ if (batch->idx.named.grpc_status != NULL) {
+ return !grpc_mdelem_eq(batch->idx.named.grpc_status->md,
+ GRPC_MDELEM_GRPC_STATUS_0);
}
return false;
}
@@ -1002,9 +999,12 @@ static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
bool is_client, bool is_initial) {
for (grpc_linked_mdelem *md = md_batch->list.head; md != md_batch->list.tail;
md = md->next) {
+ char *key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
+ char *value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md));
gpr_log(GPR_INFO, "HTTP:%d:%s:%s: %s: %s", id, is_initial ? "HDR" : "TRL",
- is_client ? "CLI" : "SVR", grpc_mdstr_as_c_string(md->md->key),
- grpc_mdstr_as_c_string(md->md->value));
+ is_client ? "CLI" : "SVR", key, value);
+ gpr_free(key);
+ gpr_free(value);
}
}
@@ -1047,11 +1047,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->cancel_error != GRPC_ERROR_NONE) {
- grpc_chttp2_cancel_stream(exec_ctx, t, s, GRPC_ERROR_REF(op->cancel_error));
- }
-
- if (op->close_error != GRPC_ERROR_NONE) {
- close_from_api(exec_ctx, t, s, GRPC_ERROR_REF(op->close_error));
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, op->cancel_error);
}
if (op->send_initial_metadata != NULL) {
@@ -1102,8 +1098,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_CREATE(
- "Attempt to send initial metadata after stream was closed"),
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Attempt to send initial metadata after stream was closed",
+ &s->write_closed_error, 1),
"send_initial_metadata_finished");
}
}
@@ -1115,7 +1112,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->write_closed) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE("Attempt to send message after stream was closed"),
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Attempt to send message after stream was closed",
+ &s->write_closed_error, 1),
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
@@ -1289,11 +1288,16 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_error_code error, grpc_slice data) {
+ grpc_error *error) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
- grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)error, data,
- &t->qbuf);
+ grpc_http2_error_code http_error;
+ const char *msg;
+ grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL, &msg,
+ &http_error);
+ grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
+ grpc_slice_from_copied_string(msg), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
+ GRPC_ERROR_UNREF(error);
}
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
@@ -1309,10 +1313,8 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
op->on_connectivity_state_change);
}
- if (op->send_goaway) {
- send_goaway(exec_ctx, t,
- grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
- grpc_slice_ref_internal(*op->goaway_message));
+ if (op->goaway_error) {
+ send_goaway(exec_ctx, t, op->goaway_error);
}
if (op->set_accept_stream) {
@@ -1373,8 +1375,8 @@ void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
}
- grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[0],
- s->recv_initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ exec_ctx, &s->metadata_buffer[0], s->recv_initial_metadata);
null_then_run_closure(exec_ctx, &s->recv_initial_metadata_ready,
GRPC_ERROR_NONE);
}
@@ -1417,8 +1419,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
if (s->all_incoming_byte_streams_finished &&
s->recv_trailing_metadata_finished != NULL) {
- grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1],
- s->recv_trailing_metadata);
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
"recv_trailing_metadata_finished");
@@ -1466,70 +1468,37 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
maybe_start_some_streams(exec_ctx, t);
}
-static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
- grpc_chttp2_error_code *http2_error,
- grpc_status_code *grpc_status) {
- intptr_t ip_http;
- intptr_t ip_grpc;
- bool have_http =
- grpc_error_get_int(error, GRPC_ERROR_INT_HTTP2_ERROR, &ip_http);
- bool have_grpc =
- grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &ip_grpc);
- if (have_http) {
- *http2_error = (grpc_chttp2_error_code)ip_http;
- } else if (have_grpc) {
- *http2_error =
- grpc_chttp2_grpc_status_to_http2_error((grpc_status_code)ip_grpc);
- } else {
- *http2_error = GRPC_CHTTP2_INTERNAL_ERROR;
- }
- if (have_grpc) {
- *grpc_status = (grpc_status_code)ip_grpc;
- } else if (have_http) {
- *grpc_status = grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)ip_http, deadline);
- } else {
- *grpc_status = GRPC_STATUS_INTERNAL;
- }
-}
-
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *due_to_error) {
- if (!s->read_closed || !s->write_closed) {
- grpc_status_code grpc_status;
- grpc_chttp2_error_code http_error;
- status_codes_from_error(due_to_error, s->deadline, &http_error,
- &grpc_status);
+ if (!t->is_client && !s->sent_trailing_metadata &&
+ grpc_error_has_clear_grpc_status(due_to_error)) {
+ close_from_api(exec_ctx, t, s, due_to_error);
+ return;
+ }
+ if (!s->read_closed || !s->write_closed) {
if (s->id != 0) {
+ grpc_http2_error_code http_error;
+ grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error);
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
}
-
- const char *msg =
- grpc_error_get_str(due_to_error, GRPC_ERROR_STR_GRPC_MESSAGE);
- bool free_msg = false;
- if (msg == NULL) {
- free_msg = true;
- msg = grpc_error_string(due_to_error);
- }
- grpc_slice msg_slice = grpc_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
- if (free_msg) grpc_error_free_string(msg);
}
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
s->seen_error = true;
- grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error);
}
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_status_code status,
- grpc_slice *slice) {
+ grpc_chttp2_stream *s, grpc_error *error) {
+ grpc_status_code status;
+ const char *msg;
+ grpc_error_get_status(error, s->deadline, &status, &msg, NULL);
+
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
}
@@ -1543,24 +1512,21 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->recv_trailing_metadata_finished != NULL) {
char status_string[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status, status_string);
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->metadata_buffer[1], grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_STATUS,
- grpc_mdstr_from_string(status_string)));
- if (slice) {
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->metadata_buffer[1],
- grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_mdstr_from_slice(exec_ctx,
- grpc_slice_ref_internal(*slice))));
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+ grpc_slice_from_copied_string(status_string)));
+ if (msg != NULL) {
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(msg)));
}
s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
- if (slice) {
- grpc_slice_unref_internal(exec_ctx, *slice);
- }
+
+ GRPC_ERROR_UNREF(error);
}
static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
@@ -1626,36 +1592,48 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
int close_writes, grpc_error *error) {
if (s->read_closed && s->write_closed) {
/* already closed */
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
GRPC_ERROR_UNREF(error);
return;
}
+ bool closed_read = false;
+ bool became_closed = false;
if (close_reads && !s->read_closed) {
s->read_closed_error = GRPC_ERROR_REF(error);
s->read_closed = true;
- for (int i = 0; i < 2; i++) {
- if (s->published_metadata[i] == GRPC_METADATA_NOT_PUBLISHED) {
- s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE;
- }
- }
- decrement_active_streams_locked(exec_ctx, t, s);
- grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
- grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
- grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+ closed_read = true;
}
if (close_writes && !s->write_closed) {
s->write_closed_error = GRPC_ERROR_REF(error);
s->write_closed = true;
grpc_chttp2_fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
- grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
if (s->read_closed && s->write_closed) {
+ became_closed = true;
+ grpc_error *overall_error =
+ removal_error(GRPC_ERROR_REF(error), s, "Stream removed");
if (s->id != 0) {
- remove_stream(exec_ctx, t, s->id,
- removal_error(GRPC_ERROR_REF(error), s, "Stream removed"));
+ remove_stream(exec_ctx, t, s->id, GRPC_ERROR_REF(overall_error));
} else {
/* Purge streams waiting on concurrency still waiting for id assignment */
grpc_chttp2_list_remove_waiting_for_concurrency(t, s);
}
+ if (overall_error != GRPC_ERROR_NONE) {
+ grpc_chttp2_fake_status(exec_ctx, t, s, overall_error);
+ }
+ }
+ if (closed_read) {
+ for (int i = 0; i < 2; i++) {
+ if (s->published_metadata[i] == GRPC_METADATA_NOT_PUBLISHED) {
+ s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE;
+ }
+ }
+ decrement_active_streams_locked(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+ }
+ if (became_closed) {
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2");
}
GRPC_ERROR_UNREF(error);
@@ -1669,112 +1647,92 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint8_t *p;
uint32_t len = 0;
grpc_status_code grpc_status;
- grpc_chttp2_error_code http_error;
- status_codes_from_error(error, s->deadline, &http_error, &grpc_status);
+ const char *msg;
+ grpc_error_get_status(error, s->deadline, &grpc_status, &msg, NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
- if (s->id != 0 && !t->is_client) {
- /* Hand roll a header block.
- This is unnecessarily ugly - at some point we should find a more
- elegant
- solution.
- It's complicated by the fact that our send machinery would be dead by
- the
- time we got around to sending this, so instead we ignore HPACK
- compression
- and just write the uncompressed bytes onto the wire. */
- status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
- p = GRPC_SLICE_START_PTR(status_hdr);
- *p++ = 0x40; /* literal header */
- *p++ = 11; /* len(grpc-status) */
+ /* Hand roll a header block.
+ This is unnecessarily ugly - at some point we should find a more
+ elegant solution.
+ It's complicated by the fact that our send machinery would be dead by
+ the time we got around to sending this, so instead we ignore HPACK
+ compression and just write the uncompressed bytes onto the wire. */
+ status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
+ p = GRPC_SLICE_START_PTR(status_hdr);
+ *p++ = 0x00; /* literal header, not indexed */
+ *p++ = 11; /* len(grpc-status) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 's';
+ *p++ = 't';
+ *p++ = 'a';
+ *p++ = 't';
+ *p++ = 'u';
+ *p++ = 's';
+ if (grpc_status < 10) {
+ *p++ = 1;
+ *p++ = (uint8_t)('0' + grpc_status);
+ } else {
+ *p++ = 2;
+ *p++ = (uint8_t)('0' + (grpc_status / 10));
+ *p++ = (uint8_t)('0' + (grpc_status % 10));
+ }
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr));
+ len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr);
+
+ if (msg != NULL) {
+ size_t msg_len = strlen(msg);
+ GPR_ASSERT(msg_len <= UINT32_MAX);
+ uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 0);
+ message_pfx = grpc_slice_malloc(14 + msg_len_len);
+ p = GRPC_SLICE_START_PTR(message_pfx);
+ *p++ = 0x00; /* literal header, not indexed */
+ *p++ = 12; /* len(grpc-message) */
*p++ = 'g';
*p++ = 'r';
*p++ = 'p';
*p++ = 'c';
*p++ = '-';
+ *p++ = 'm';
+ *p++ = 'e';
*p++ = 's';
- *p++ = 't';
- *p++ = 'a';
- *p++ = 't';
- *p++ = 'u';
*p++ = 's';
- if (grpc_status < 10) {
- *p++ = 1;
- *p++ = (uint8_t)('0' + grpc_status);
- } else {
- *p++ = 2;
- *p++ = (uint8_t)('0' + (grpc_status / 10));
- *p++ = (uint8_t)('0' + (grpc_status % 10));
- }
- GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr));
- len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr);
-
- const char *optional_message =
- grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
-
- if (optional_message != NULL) {
- size_t msg_len = strlen(optional_message);
- GPR_ASSERT(msg_len <= UINT32_MAX);
- uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 0);
- message_pfx = grpc_slice_malloc(14 + msg_len_len);
- p = GRPC_SLICE_START_PTR(message_pfx);
- *p++ = 0x40;
- *p++ = 12; /* len(grpc-message) */
- *p++ = 'g';
- *p++ = 'r';
- *p++ = 'p';
- *p++ = 'c';
- *p++ = '-';
- *p++ = 'm';
- *p++ = 'e';
- *p++ = 's';
- *p++ = 's';
- *p++ = 'a';
- *p++ = 'g';
- *p++ = 'e';
- GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 0, 0, p,
- (uint32_t)msg_len_len);
- p += msg_len_len;
- GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
- len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
- len += (uint32_t)msg_len;
- }
-
- hdr = grpc_slice_malloc(9);
- p = GRPC_SLICE_START_PTR(hdr);
- *p++ = (uint8_t)(len >> 16);
- *p++ = (uint8_t)(len >> 8);
- *p++ = (uint8_t)(len);
- *p++ = GRPC_CHTTP2_FRAME_HEADER;
- *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
- *p++ = (uint8_t)(s->id >> 24);
- *p++ = (uint8_t)(s->id >> 16);
- *p++ = (uint8_t)(s->id >> 8);
- *p++ = (uint8_t)(s->id);
- GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr));
-
- grpc_slice_buffer_add(&t->qbuf, hdr);
- grpc_slice_buffer_add(&t->qbuf, status_hdr);
- if (optional_message) {
- grpc_slice_buffer_add(&t->qbuf, message_pfx);
- grpc_slice_buffer_add(&t->qbuf,
- grpc_slice_from_copied_string(optional_message));
- }
- grpc_slice_buffer_add(
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
- &s->stats.outgoing));
- }
-
- const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
- bool free_msg = false;
- if (msg == NULL) {
- free_msg = true;
- msg = grpc_error_string(error);
- }
- grpc_slice msg_slice = grpc_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
- if (free_msg) grpc_error_free_string(msg);
+ *p++ = 'a';
+ *p++ = 'g';
+ *p++ = 'e';
+ GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 0, 0, p, (uint32_t)msg_len_len);
+ p += msg_len_len;
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
+ len += (uint32_t)msg_len;
+ }
+
+ hdr = grpc_slice_malloc(9);
+ p = GRPC_SLICE_START_PTR(hdr);
+ *p++ = (uint8_t)(len >> 16);
+ *p++ = (uint8_t)(len >> 8);
+ *p++ = (uint8_t)(len);
+ *p++ = GRPC_CHTTP2_FRAME_HEADER;
+ *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
+ *p++ = (uint8_t)(s->id >> 24);
+ *p++ = (uint8_t)(s->id >> 16);
+ *p++ = (uint8_t)(s->id >> 8);
+ *p++ = (uint8_t)(s->id);
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr));
+
+ grpc_slice_buffer_add(&t->qbuf, hdr);
+ grpc_slice_buffer_add(&t->qbuf, status_hdr);
+ if (msg != NULL) {
+ grpc_slice_buffer_add(&t->qbuf, message_pfx);
+ grpc_slice_buffer_add(&t->qbuf, grpc_slice_from_copied_string(msg));
+ }
+ grpc_slice_buffer_add(
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
+ &s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
grpc_chttp2_initiate_write(exec_ctx, t, false, "close_from_api");
@@ -1846,8 +1804,10 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
if (parse_error == GRPC_ERROR_NONE &&
(parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) {
error = grpc_error_set_int(
- GRPC_ERROR_CREATE("Trying to connect an http1.x server"),
- GRPC_ERROR_INT_HTTP_STATUS, response.status);
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE("Trying to connect an http1.x server"),
+ GRPC_ERROR_INT_HTTP_STATUS, response.status),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
}
GRPC_ERROR_UNREF(parse_error);
@@ -1948,7 +1908,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (grpc_bdp_estimator_get_estimate(&t->bdp_estimator, &estimate)) {
double target = log2((double)estimate);
double memory_pressure = grpc_resource_quota_get_memory_pressure(
- grpc_resource_user_get_quota(grpc_endpoint_get_resource_user(t->ep)));
+ grpc_resource_user_quota(grpc_endpoint_get_resource_user(t->ep)));
if (memory_pressure > 0.8) {
target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1);
}
@@ -2163,6 +2123,8 @@ static void incoming_byte_stream_publish_error(
grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error));
bs->on_next = NULL;
GRPC_ERROR_UNREF(bs->error);
+ grpc_chttp2_cancel_stream(exec_ctx, bs->transport, bs->stream,
+ GRPC_ERROR_REF(error));
bs->error = error;
}
@@ -2271,8 +2233,10 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
t->peer_string);
}
- send_goaway(exec_ctx, t, GRPC_CHTTP2_ENHANCE_YOUR_CALM,
- grpc_slice_from_static_string("Buffers full"));
+ send_goaway(exec_ctx, t,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
@@ -2301,7 +2265,7 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_cancel_stream(
exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR,
- GRPC_CHTTP2_ENHANCE_YOUR_CALM));
+ GRPC_HTTP2_ENHANCE_YOUR_CALM));
if (n > 1) {
/* Since we cancel one stream per destructive reclamation, if
there are more streams left, we can immediately post a new
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index 20043f5fbf..7d5beed09d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -39,8 +39,7 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
-#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
+#include "src/core/lib/transport/http2_errors.h"
grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@@ -109,17 +108,9 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
- if (reason != GRPC_CHTTP2_NO_ERROR || s->header_frames_received < 2) {
+ if (reason != GRPC_HTTP2_NO_ERROR || s->header_frames_received < 2) {
error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
- grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)reason, s->deadline);
- char *status_details;
- gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
- reason);
- grpc_slice slice_details = grpc_slice_from_copied_string(status_details);
- gpr_free(status_details);
- grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 2acfa2052a..82290e34cd 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -43,8 +43,8 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/frame.h"
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/transport/http2_errors.h"
#define MAX_MAX_HEADER_LIST_SIZE (1024 * 1024 * 1024)
@@ -52,21 +52,21 @@
const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {
{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
- GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_HTTP2_PROTOCOL_ERROR},
{"HEADER_TABLE_SIZE", 4096, 0, 0xffffffff,
- GRPC_CHTTP2_CLAMP_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_CHTTP2_CLAMP_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},
{"ENABLE_PUSH", 1, 0, 1, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
- GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_HTTP2_PROTOCOL_ERROR},
{"MAX_CONCURRENT_STREAMS", 0xffffffffu, 0, 0xffffffffu,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},
{"INITIAL_WINDOW_SIZE", 65535, 0, 0x7fffffffu,
GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
- GRPC_CHTTP2_FLOW_CONTROL_ERROR},
+ GRPC_HTTP2_FLOW_CONTROL_ERROR},
{"MAX_FRAME_SIZE", 16384, 16384, 16777215,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},
{"MAX_HEADER_LIST_SIZE", MAX_MAX_HEADER_LIST_SIZE, 0,
MAX_MAX_HEADER_LIST_SIZE, GRPC_CHTTP2_CLAMP_INVALID_VALUE,
- GRPC_CHTTP2_PROTOCOL_ERROR},
+ GRPC_HTTP2_PROTOCOL_ERROR},
};
static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 49a8326f62..63df8e135f 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -49,6 +49,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_table.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
@@ -64,6 +65,10 @@
/* don't consider adding anything bigger than this to the hpack table */
#define MAX_DECODER_SPACE_USAGE 512
+static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
+static const grpc_slice terminal_slice = {&terminal_slice_refcount,
+ .data.refcounted = {0, 0}};
+
extern int grpc_http_trace;
typedef struct {
@@ -185,9 +190,12 @@ static void evict_entry(grpc_chttp2_hpack_compressor *c) {
/* add an element to the decoder table */
static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem) {
- uint32_t key_hash = elem->key->hash;
- uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
+ grpc_mdelem elem) {
+ GPR_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
+
+ uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
+ uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
+ uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem);
@@ -212,17 +220,18 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
c->table_elems++;
/* Store this element into {entries,indices}_elem */
- if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem) {
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem)) {
/* already there: update with new index */
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem) {
+ } else if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)],
+ elem)) {
/* already there (cuckoo): update with new index */
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- } else if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == NULL) {
+ } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) {
/* not there, but a free element: add */
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == NULL) {
+ } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) {
/* not there (cuckoo), but a free element: add */
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
@@ -241,24 +250,34 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
/* do exactly the same for the key (so we can find by that again too) */
- if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key) {
+ if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)],
+ GRPC_MDKEY(elem))) {
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key) {
+ } else if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_3(key_hash)],
+ GRPC_MDKEY(elem))) {
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == NULL) {
- c->entries_keys[HASH_FRAGMENT_2(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ } else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)].refcount ==
+ &terminal_slice_refcount) {
+ c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
+ grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
- } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == NULL) {
- c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)].refcount ==
+ &terminal_slice_refcount) {
+ c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
+ grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
} else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
- GRPC_MDSTR_UNREF(exec_ctx, c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
- c->entries_keys[HASH_FRAGMENT_2(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ grpc_slice_unref_internal(exec_ctx,
+ c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
+ c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
+ grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
} else {
- GRPC_MDSTR_UNREF(exec_ctx, c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
- c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
+ grpc_slice_unref_internal(exec_ctx,
+ c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
+ c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
+ grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
}
}
@@ -270,20 +289,18 @@ static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
len);
}
-static grpc_slice get_wire_value(grpc_mdelem *elem, uint8_t *huffman_prefix) {
- if (grpc_is_binary_header(
- (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
- GRPC_SLICE_LENGTH(elem->key->slice))) {
+static grpc_slice get_wire_value(grpc_mdelem elem, uint8_t *huffman_prefix) {
+ if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
*huffman_prefix = 0x80;
- return grpc_mdstr_as_base64_encoded_and_huffman_compressed(elem->value);
+ return grpc_chttp2_base64_encode_and_huffman_compress(GRPC_MDVALUE(elem));
}
/* TODO(ctiller): opportunistically compress non-binary headers */
*huffman_prefix = 0x00;
- return elem->value->slice;
+ return grpc_slice_ref_internal(GRPC_MDVALUE(elem));
}
static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
- uint32_t key_index, grpc_mdelem *elem,
+ uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
uint8_t huffman_prefix;
@@ -296,11 +313,11 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, grpc_slice_ref_internal(value_slice));
+ add_header_data(st, value_slice);
}
static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
- uint32_t key_index, grpc_mdelem *elem,
+ uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
uint8_t huffman_prefix;
@@ -313,12 +330,12 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
add_tiny_header_data(st, len_pfx), len_pfx);
GRPC_CHTTP2_WRITE_VARINT((uint32_t)len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, grpc_slice_ref_internal(value_slice));
+ add_header_data(st, value_slice);
}
static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(elem->key->slice);
+ grpc_mdelem elem, framer_state *st) {
+ uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
uint8_t huffman_prefix;
grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
uint32_t len_val = (uint32_t)GRPC_SLICE_LENGTH(value_slice);
@@ -329,15 +346,15 @@ static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
*add_tiny_header_data(st, 1) = 0x40;
GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, grpc_slice_ref_internal(elem->key->slice));
+ add_header_data(st, grpc_slice_ref_internal(GRPC_MDKEY(elem)));
GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, grpc_slice_ref_internal(value_slice));
+ add_header_data(st, value_slice);
}
static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(elem->key->slice);
+ grpc_mdelem elem, framer_state *st) {
+ uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
uint8_t huffman_prefix;
grpc_slice value_slice = get_wire_value(elem, &huffman_prefix);
uint32_t len_val = (uint32_t)GRPC_SLICE_LENGTH(value_slice);
@@ -348,10 +365,10 @@ static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
*add_tiny_header_data(st, 1) = 0x00;
GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00,
add_tiny_header_data(st, len_key_len), len_key_len);
- add_header_data(st, grpc_slice_ref_internal(elem->key->slice));
+ add_header_data(st, grpc_slice_ref_internal(GRPC_MDKEY(elem)));
GRPC_CHTTP2_WRITE_VARINT(len_val, 1, huffman_prefix,
add_tiny_header_data(st, len_val_len), len_val_len);
- add_header_data(st, grpc_slice_ref_internal(value_slice));
+ add_header_data(st, value_slice);
}
static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c,
@@ -369,15 +386,9 @@ static uint32_t dynidx(grpc_chttp2_hpack_compressor *c, uint32_t elem_index) {
/* encode an mdelem */
static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
- uint32_t key_hash = elem->key->hash;
- uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
- size_t decoder_space_usage;
- uint32_t indices_key;
- int should_add_elem;
-
- GPR_ASSERT(GRPC_SLICE_LENGTH(elem->key->slice) > 0);
- if (GRPC_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
+ grpc_mdelem elem, framer_state *st) {
+ GPR_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0);
+ if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */
st->seen_regular_header = 1;
} else {
GPR_ASSERT(
@@ -385,11 +396,39 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
"Reserved header (colon-prefixed) happening after regular ones.");
}
+ if (grpc_http_trace && !GRPC_MDELEM_IS_INTERNED(elem)) {
+ char *k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
+ char *v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
+ gpr_log(
+ GPR_DEBUG,
+ "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
+ k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
+ grpc_slice_is_interned(GRPC_MDKEY(elem)),
+ grpc_slice_is_interned(GRPC_MDVALUE(elem)));
+ gpr_free(k);
+ gpr_free(v);
+ }
+ if (!GRPC_MDELEM_IS_INTERNED(elem)) {
+ emit_lithdr_noidx_v(c, elem, st);
+ return;
+ }
+
+ uint32_t key_hash;
+ uint32_t value_hash;
+ uint32_t elem_hash;
+ size_t decoder_space_usage;
+ uint32_t indices_key;
+ int should_add_elem;
+
+ key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
+ value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
+ elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
+
inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
/* is this elem currently in the decoders table? */
- if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem &&
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (first cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
@@ -397,7 +436,7 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
return;
}
- if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem &&
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (second cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
@@ -414,7 +453,8 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
/* no hits for the elem... maybe there's a key? */
indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
- if (c->entries_keys[HASH_FRAGMENT_2(key_hash)] == elem->key &&
+ if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)],
+ GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
@@ -429,7 +469,8 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
- if (c->entries_keys[HASH_FRAGMENT_3(key_hash)] == elem->key &&
+ if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_3(key_hash)],
+ GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
@@ -463,11 +504,11 @@ static void deadline_enc(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
framer_state *st) {
char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
- grpc_mdelem *mdelem;
+ grpc_mdelem mdelem;
grpc_http2_encode_timeout(
gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
- mdelem = grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT, grpc_mdstr_from_string(timeout_str));
+ mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT,
+ grpc_slice_from_copied_string(timeout_str));
hpack_enc(exec_ctx, c, mdelem, st);
GRPC_MDELEM_UNREF(exec_ctx, mdelem);
}
@@ -484,14 +525,19 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
+ c->entries_keys[i] = terminal_slice;
+ }
}
void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c) {
int i;
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
- if (c->entries_keys[i]) GRPC_MDSTR_UNREF(exec_ctx, c->entries_keys[i]);
- if (c->entries_elems[i]) GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[i]);
+ if (c->entries_keys[i].refcount != &terminal_slice_refcount) {
+ grpc_slice_unref_internal(exec_ctx, c->entries_keys[i]);
+ }
+ GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[i]);
}
gpr_free(c->table_elem_size);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 3a35496ec8..83ba5b1b3e 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -74,8 +74,8 @@ typedef struct {
/* entry tables for keys & elems: these tables track values that have been
seen and *may* be in the decompressor table */
- grpc_mdstr *entries_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- grpc_mdelem *entries_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
+ grpc_slice entries_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
+ grpc_mdelem entries_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
uint32_t indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
uint32_t indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 8b91cc760b..40f5120308 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -50,9 +50,13 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/http2_errors.h"
+
+/* TODO(ctiller): remove before submission */
+#include "src/core/lib/slice/slice_string_helpers.h"
extern int grpc_http_trace;
@@ -668,8 +672,22 @@ static const uint8_t inverse_base64[256] = {
/* emission helpers */
static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
- grpc_mdelem *md, int add_to_table) {
+ grpc_mdelem md, int add_to_table) {
+ if (grpc_http_trace && !GRPC_MDELEM_IS_INTERNED(md)) {
+ char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(
+ GPR_DEBUG,
+ "Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
+ k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md),
+ grpc_slice_is_interned(GRPC_MDKEY(md)),
+ grpc_slice_is_interned(GRPC_MDVALUE(md)));
+ gpr_free(k);
+ gpr_free(v);
+ }
if (add_to_table) {
+ GPR_ASSERT(GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_INTERNED ||
+ GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC);
grpc_error *err = grpc_chttp2_hptbl_add(exec_ctx, &p->table, md);
if (err != GRPC_ERROR_NONE) return err;
}
@@ -681,10 +699,28 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
return GRPC_ERROR_NONE;
}
-static grpc_mdstr *take_string(grpc_chttp2_hpack_parser *p,
- grpc_chttp2_hpack_parser_string *str) {
- grpc_mdstr *s = grpc_mdstr_from_buffer((uint8_t *)str->str, str->length);
- str->length = 0;
+static grpc_slice take_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
+ grpc_chttp2_hpack_parser_string *str,
+ bool intern) {
+ grpc_slice s;
+ if (!str->copied) {
+ if (intern) {
+ s = grpc_slice_intern(str->data.referenced);
+ grpc_slice_unref_internal(exec_ctx, str->data.referenced);
+ } else {
+ s = str->data.referenced;
+ }
+ str->copied = true;
+ str->data.referenced = grpc_empty_slice();
+ } else if (intern) {
+ s = grpc_slice_intern(grpc_slice_from_static_buffer(
+ str->data.copied.str, str->data.copied.length));
+ } else {
+ s = grpc_slice_from_copied_buffer(str->data.copied.str,
+ str->data.copied.length);
+ }
+ str->data.copied.length = 0;
return s;
}
@@ -771,8 +807,8 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- if (md == NULL) {
+ grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ if (GRPC_MDISNULL(md)) {
return grpc_error_set_int(
grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
@@ -813,12 +849,13 @@ static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 1);
+ grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
+ take_string(exec_ctx, p, &p->value, true)),
+ 1);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -828,10 +865,11 @@ static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, take_string(p, &p->key),
- take_string(p, &p->value)),
- 1);
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
+ take_string(exec_ctx, p, &p->value, true)),
+ 1);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -881,12 +919,13 @@ static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
+ grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
+ take_string(exec_ctx, p, &p->value, false)),
+ 0);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -896,10 +935,11 @@ static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
+ take_string(exec_ctx, p, &p->value, false)),
+ 0);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -949,12 +989,13 @@ static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
+ grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
+ take_string(exec_ctx, p, &p->value, false)),
+ 0);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -964,10 +1005,11 @@ static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err = on_hdr(exec_ctx, p, grpc_mdelem_from_metadata_strings(
- exec_ctx, take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
+ grpc_error *err = on_hdr(
+ exec_ctx, p,
+ grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
+ take_string(exec_ctx, p, &p->value, false)),
+ 0);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
@@ -1261,14 +1303,15 @@ static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
static void append_bytes(grpc_chttp2_hpack_parser_string *str,
const uint8_t *data, size_t length) {
if (length == 0) return;
- if (length + str->length > str->capacity) {
- GPR_ASSERT(str->length + length <= UINT32_MAX);
- str->capacity = (uint32_t)(str->length + length);
- str->str = gpr_realloc(str->str, str->capacity);
+ if (length + str->data.copied.length > str->data.copied.capacity) {
+ GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
+ str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
+ str->data.copied.str =
+ gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
- memcpy(str->str + str->length, data, length);
- GPR_ASSERT(length <= UINT32_MAX - str->length);
- str->length += (uint32_t)length;
+ memcpy(str->data.copied.str + str->data.copied.length, data, length);
+ GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
+ str->data.copied.length += (uint32_t)length;
}
static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
@@ -1351,11 +1394,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
exec_ctx, p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
}
-/* append a null terminator to a string */
static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
- uint8_t terminator = 0;
uint8_t decoded[2];
uint32_t bits;
grpc_chttp2_hpack_parser_string *str = p->parsing.str;
@@ -1396,8 +1437,6 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
append_bytes(str, decoded, 2);
break;
}
- append_bytes(str, &terminator, 1);
- p->parsing.str->length--; /* don't actually count the null terminator */
return GRPC_ERROR_NONE;
}
@@ -1472,8 +1511,18 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
const uint8_t *cur, const uint8_t *end,
uint8_t binary,
grpc_chttp2_hpack_parser_string *str) {
+ if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
+ p->current_slice_refcount != NULL) {
+ str->copied = false;
+ str->data.referenced.refcount = p->current_slice_refcount;
+ str->data.referenced.data.refcounted.bytes = (uint8_t *)cur;
+ str->data.referenced.data.refcounted.length = p->strlen;
+ grpc_slice_ref_internal(str->data.referenced);
+ return parse_next(exec_ctx, p, cur + p->strlen, end);
+ }
p->strgot = 0;
- str->length = 0;
+ str->copied = true;
+ str->data.copied.length = 0;
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
@@ -1490,21 +1539,22 @@ static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
/* check if a key represents a binary header or not */
static bool is_binary_literal_header(grpc_chttp2_hpack_parser *p) {
- return grpc_is_binary_header(p->key.str, p->key.length);
+ return grpc_is_binary_header(
+ p->key.copied ? grpc_slice_from_static_buffer(p->key.data.copied.str,
+ p->key.data.copied.length)
+ : p->key.data.referenced);
}
static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
bool *is) {
- grpc_mdelem *elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- if (!elem) {
+ grpc_mdelem elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ if (GRPC_MDISNULL(elem)) {
return grpc_error_set_int(
grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
- *is = grpc_is_binary_header(
- (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
- GRPC_SLICE_LENGTH(elem->key->slice));
+ *is = grpc_is_binary_header(GRPC_MDKEY(elem));
return GRPC_ERROR_NONE;
}
@@ -1539,12 +1589,14 @@ void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx,
p->on_header = NULL;
p->on_header_user_data = NULL;
p->state = parse_begin;
- p->key.str = NULL;
- p->key.capacity = 0;
- p->key.length = 0;
- p->value.str = NULL;
- p->value.capacity = 0;
- p->value.length = 0;
+ p->key.data.referenced = grpc_empty_slice();
+ p->key.data.copied.str = NULL;
+ p->key.data.copied.capacity = 0;
+ p->key.data.copied.length = 0;
+ p->value.data.referenced = grpc_empty_slice();
+ p->value.data.copied.str = NULL;
+ p->value.data.copied.capacity = 0;
+ p->value.data.copied.length = 0;
p->dynamic_table_update_allowed = 2;
p->last_error = GRPC_ERROR_NONE;
grpc_chttp2_hptbl_init(exec_ctx, &p->table);
@@ -1559,19 +1611,24 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p) {
grpc_chttp2_hptbl_destroy(exec_ctx, &p->table);
GRPC_ERROR_UNREF(p->last_error);
- gpr_free(p->key.str);
- gpr_free(p->value.str);
+ grpc_slice_unref_internal(exec_ctx, p->key.data.referenced);
+ grpc_slice_unref_internal(exec_ctx, p->value.data.referenced);
+ gpr_free(p->key.data.copied.str);
+ gpr_free(p->value.data.copied.str);
}
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
- const uint8_t *beg,
- const uint8_t *end) {
+ grpc_slice slice) {
/* TODO(ctiller): limit the distance of end from beg, and perform multiple
steps in the event of a large chunk of data to limit
stack space usage when no tail call optimization is
available */
- return p->state(exec_ctx, p, beg, end);
+ p->current_slice_refcount = slice.refcount;
+ grpc_error *error = p->state(exec_ctx, p, GRPC_SLICE_START_PTR(slice),
+ GRPC_SLICE_END_PTR(slice));
+ p->current_slice_refcount = NULL;
+ return error;
}
typedef void (*maybe_complete_func_type)(grpc_exec_ctx *exec_ctx,
@@ -1587,7 +1644,7 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
- &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
@@ -1605,8 +1662,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
}
- grpc_error *error = grpc_chttp2_hpack_parser_parse(
- exec_ctx, parser, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_END_PTR(slice));
+ grpc_error *error = grpc_chttp2_hpack_parser_parse(exec_ctx, parser, slice);
if (error != GRPC_ERROR_NONE) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return error;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 52ccf1e7a7..a817183eb5 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -49,14 +49,20 @@ typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
const uint8_t *end);
typedef struct {
- char *str;
- uint32_t length;
- uint32_t capacity;
+ bool copied;
+ struct {
+ grpc_slice referenced;
+ struct {
+ char *str;
+ uint32_t length;
+ uint32_t capacity;
+ } copied;
+ } data;
} grpc_chttp2_hpack_parser_string;
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
- void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
+ void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem md);
void *on_header_user_data;
grpc_error *last_error;
@@ -67,6 +73,8 @@ struct grpc_chttp2_hpack_parser {
const grpc_chttp2_hpack_parser_state *next_state;
/* what to do after skipping prioritization data */
grpc_chttp2_hpack_parser_state after_prioritization;
+ /* the refcount of the slice that we're currently parsing */
+ grpc_slice_refcount *current_slice_refcount;
/* the value we're currently parsing */
union {
uint32_t *value;
@@ -106,11 +114,9 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
-/* returns 1 on success, 0 on error */
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
- const uint8_t *beg,
- const uint8_t *end);
+ grpc_slice slice);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 26d4036d49..62dd1b8cab 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -190,8 +190,11 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
- tbl->static_ents[i - 1] = grpc_mdelem_from_strings(
- exec_ctx, static_table[i].key, static_table[i].value);
+ tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
+ exec_ctx,
+ grpc_slice_intern(grpc_slice_from_static_string(static_table[i].key)),
+ grpc_slice_intern(
+ grpc_slice_from_static_string(static_table[i].value)));
}
}
@@ -208,8 +211,8 @@ void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(tbl->ents);
}
-grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
- uint32_t tbl_index) {
+grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
+ uint32_t tbl_index) {
/* Static table comes first, just return an entry from it */
if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
return tbl->static_ents[tbl_index - 1];
@@ -222,14 +225,14 @@ grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
return tbl->ents[offset];
}
/* Invalid entry: return error */
- return NULL;
+ return GRPC_MDNULL;
}
/* Evict one element from the table */
static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
- grpc_mdelem *first_ent = tbl->ents[tbl->first_ent];
- size_t elem_bytes = GRPC_SLICE_LENGTH(first_ent->key->slice) +
- GRPC_SLICE_LENGTH(first_ent->value->slice) +
+ grpc_mdelem first_ent = tbl->ents[tbl->first_ent];
+ size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(first_ent)) +
+ GRPC_SLICE_LENGTH(GRPC_MDVALUE(first_ent)) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
GPR_ASSERT(elem_bytes <= tbl->mem_used);
tbl->mem_used -= (uint32_t)elem_bytes;
@@ -239,7 +242,7 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
- grpc_mdelem **ents = gpr_malloc(sizeof(*ents) * new_cap);
+ grpc_mdelem *ents = gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {
@@ -301,10 +304,10 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
}
grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
+ grpc_chttp2_hptbl *tbl, grpc_mdelem md) {
/* determine how many bytes of buffer this entry represents */
- size_t elem_bytes = GRPC_SLICE_LENGTH(md->key->slice) +
- GRPC_SLICE_LENGTH(md->value->slice) +
+ size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(md)) +
+ GRPC_SLICE_LENGTH(GRPC_MDVALUE(md)) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
if (tbl->current_table_bytes > tbl->max_bytes) {
@@ -352,16 +355,16 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
}
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
- const grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
+ const grpc_chttp2_hptbl *tbl, grpc_mdelem md) {
grpc_chttp2_hptbl_find_result r = {0, 0};
uint32_t i;
/* See if the string is in the static table */
for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
- grpc_mdelem *ent = tbl->static_ents[i];
- if (md->key != ent->key) continue;
+ grpc_mdelem ent = tbl->static_ents[i];
+ if (!grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDKEY(ent))) continue;
r.index = i + 1u;
- r.has_value = md->value == ent->value;
+ r.has_value = grpc_slice_eq(GRPC_MDVALUE(md), GRPC_MDVALUE(ent));
if (r.has_value) return r;
}
@@ -369,10 +372,10 @@ grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
for (i = 0; i < tbl->num_ents; i++) {
uint32_t idx =
(uint32_t)(tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY);
- grpc_mdelem *ent = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
- if (md->key != ent->key) continue;
+ grpc_mdelem ent = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
+ if (!grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDKEY(ent))) continue;
r.index = idx;
- r.has_value = md->value == ent->value;
+ r.has_value = grpc_slice_eq(GRPC_MDVALUE(md), GRPC_MDVALUE(ent));
if (r.has_value) return r;
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.h b/src/core/ext/transport/chttp2/transport/hpack_table.h
index 144574ef06..32a0380e00 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.h
@@ -79,8 +79,8 @@ typedef struct {
/* a circular buffer of headers - this is stored in the opposite order to
what hpack specifies, in order to simplify table management a little...
meaning lookups need to SUBTRACT from the end position */
- grpc_mdelem **ents;
- grpc_mdelem *static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY];
+ grpc_mdelem *ents;
+ grpc_mdelem static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY];
} grpc_chttp2_hptbl;
/* initialize a hpack table */
@@ -94,12 +94,12 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
uint32_t bytes);
/* lookup a table entry based on its hpack index */
-grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
- uint32_t index);
+grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
+ uint32_t index);
/* add a table entry to the index */
grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hptbl *tbl,
- grpc_mdelem *md) GRPC_MUST_USE_RESULT;
+ grpc_mdelem md) GRPC_MUST_USE_RESULT;
/* Find a key/value pair in the table... returns the index in the table of the
most similar entry, or 0 if the value was not found */
typedef struct {
@@ -107,6 +107,6 @@ typedef struct {
int has_value;
} grpc_chttp2_hptbl_find_result;
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
- const grpc_chttp2_hptbl *tbl, grpc_mdelem *md);
+ const grpc_chttp2_hptbl *tbl, grpc_mdelem md);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H */
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
index 5d1094999c..c91b019aa0 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
@@ -57,7 +57,7 @@ void grpc_chttp2_incoming_metadata_buffer_destroy(
}
void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem) {
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) {
GPR_ASSERT(!buffer->published);
if (buffer->capacity == buffer->count) {
buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
@@ -68,6 +68,19 @@ void grpc_chttp2_incoming_metadata_buffer_add(
buffer->size += GRPC_MDELEM_LENGTH(elem);
}
+void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem) {
+ for (size_t i = 0; i < buffer->count; i++) {
+ if (grpc_slice_eq(GRPC_MDKEY(buffer->elems[i].md), GRPC_MDKEY(elem))) {
+ GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
+ buffer->elems[i].md = elem;
+ return;
+ }
+ }
+ grpc_chttp2_incoming_metadata_buffer_add(buffer, elem);
+}
+
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
GPR_ASSERT(!buffer->published);
@@ -75,21 +88,20 @@ void grpc_chttp2_incoming_metadata_buffer_set_deadline(
}
void grpc_chttp2_incoming_metadata_buffer_publish(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_metadata_batch *batch) {
GPR_ASSERT(!buffer->published);
buffer->published = 1;
if (buffer->count > 0) {
size_t i;
- for (i = 1; i < buffer->count; i++) {
- buffer->elems[i].prev = &buffer->elems[i - 1];
- }
- for (i = 0; i < buffer->count - 1; i++) {
- buffer->elems[i].next = &buffer->elems[i + 1];
+ for (i = 0; i < buffer->count; i++) {
+ /* TODO(ctiller): do something better here */
+ if (!GRPC_LOG_IF_ERROR("grpc_chttp2_incoming_metadata_buffer_publish",
+ grpc_metadata_batch_link_tail(
+ exec_ctx, batch, &buffer->elems[i]))) {
+ GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
+ }
}
- buffer->elems[0].prev = NULL;
- buffer->elems[buffer->count - 1].next = NULL;
- batch->list.head = &buffer->elems[0];
- batch->list.tail = &buffer->elems[buffer->count - 1];
} else {
batch->list.head = batch->list.tail = NULL;
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index 7a0c4da15f..1eac6fc150 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -51,10 +51,14 @@ void grpc_chttp2_incoming_metadata_buffer_init(
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_publish(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_metadata_batch *batch);
void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem);
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem);
+void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem);
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index dfcb296ba3..306d63a3d0 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -656,8 +656,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
uint32_t stream_id, int64_t val1, int64_t val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *stream,
- grpc_status_code status, grpc_slice *details);
+ grpc_chttp2_stream *stream, grpc_error *error);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 4373391e44..80e7dbb96d 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -39,10 +39,11 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
-#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/transport/http2_errors.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/timeout_encoding.h"
static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
@@ -200,7 +201,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
if (t->incoming_frame_size == 0) {
- err = parse_frame_slice(exec_ctx, t, gpr_empty_slice(), 1);
+ err = parse_frame_slice(exec_ctx, t, grpc_empty_slice(), 1);
if (err != GRPC_ERROR_NONE) {
return err;
}
@@ -335,7 +336,7 @@ static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser,
return GRPC_ERROR_NONE;
}
-static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem *md) {
+static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem md) {
GRPC_MDELEM_UNREF(exec_ctx, md);
}
@@ -437,7 +438,7 @@ error_handler:
}
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
+ GRPC_HTTP2_PROTOCOL_ERROR,
&s->stats.outgoing));
return init_skip_frame_parser(exec_ctx, t, 0);
} else {
@@ -448,7 +449,7 @@ error_handler:
static void free_timeout(void *p) { gpr_free(p); }
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_mdelem *md) {
+ grpc_mdelem md) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_stream *s = t->incoming_stream;
@@ -456,32 +457,42 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_ASSERT(s != NULL);
- GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
- grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
+ if (grpc_http_trace) {
+ char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char *value =
+ grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id,
+ t->is_client ? "CLI" : "SVR", key, value);
+ gpr_free(key);
+ gpr_free(value);
+ }
- if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_STATUS) &&
+ !grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) {
/* TODO(ctiller): check for a status like " 0" */
s->seen_error = true;
}
- if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
+ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
- if (!cached_timeout) {
+ gpr_timespec timeout;
+ if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
cached_timeout = gpr_malloc(sizeof(gpr_timespec));
- if (!grpc_http2_decode_timeout(grpc_mdstr_as_c_string(md->value),
- cached_timeout)) {
- gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
- grpc_mdstr_as_c_string(md->value));
+ if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
+ char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
+ gpr_free(val);
*cached_timeout = gpr_inf_future(GPR_TIMESPAN);
}
- cached_timeout =
- grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
+ timeout = *cached_timeout;
+ grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
+ } else {
+ timeout = *cached_timeout;
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&s->metadata_buffer[0],
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout));
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
@@ -510,7 +521,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_mdelem *md) {
+ grpc_mdelem md) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_stream *s = t->incoming_stream;
@@ -518,11 +529,18 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_ASSERT(s != NULL);
- GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
- grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
+ if (grpc_http_trace) {
+ char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char *value =
+ grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id,
+ t->is_client ? "CLI" : "SVR", key, value);
+ gpr_free(key);
+ gpr_free(value);
+ }
- if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_STATUS) &&
+ !grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) {
/* TODO(ctiller): check for a status like " 0" */
s->seen_error = true;
}
@@ -738,14 +756,13 @@ static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
if (grpc_http_trace) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
- grpc_error_free_string(msg);
}
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
if (s) {
s->forced_close_error = err;
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
+ GRPC_HTTP2_PROTOCOL_ERROR,
&s->stats.outgoing));
} else {
GRPC_ERROR_UNREF(err);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 6ef3b147be..30da3d2911 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -37,9 +37,9 @@
#include <grpc/support/log.h>
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/transport/http2_errors.h"
static void add_to_write_list(grpc_chttp2_write_cb **list,
grpc_chttp2_write_cb *cb) {
@@ -226,7 +226,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
- s->id, GRPC_CHTTP2_NO_ERROR,
+ s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
}
}
@@ -262,7 +262,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
if (!t->is_client && !s->read_closed) {
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
- s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
+ s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
now_writing = true;
}
diff --git a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
index df1acddcc0..477cf07f45 100644
--- a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
+++ b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
@@ -60,7 +60,7 @@ GRPCAPI grpc_channel *grpc_cronet_secure_channel_create(
ct->host = gpr_malloc(strlen(target) + 1);
strcpy(ct->host, target);
gpr_log(GPR_DEBUG,
- "grpc_create_cronet_transport: cronet_engine = %p, target=%s", engine,
+ "grpc_create_cronet_transport: stream_engine = %p, target=%s", engine,
ct->host);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c b/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
index 687026c9fd..74327a4214 100644
--- a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
+++ b/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
@@ -38,48 +38,46 @@ library, so we can build it in all environments */
#include <grpc/support/log.h>
-#include "third_party/objective_c/Cronet/cronet_c_for_grpc.h"
+#include "third_party/Cronet/bidirectional_stream_c.h"
#ifdef GRPC_COMPILE_WITH_CRONET
/* link with the real CRONET library in the build system */
#else
/* Dummy implementation of cronet API just to test for build-ability */
-cronet_bidirectional_stream* cronet_bidirectional_stream_create(
- cronet_engine* engine, void* annotation,
- cronet_bidirectional_stream_callback* callback) {
+bidirectional_stream* bidirectional_stream_create(
+ stream_engine* engine, void* annotation,
+ bidirectional_stream_callback* callback) {
GPR_ASSERT(0);
return NULL;
}
-int cronet_bidirectional_stream_destroy(cronet_bidirectional_stream* stream) {
+int bidirectional_stream_destroy(bidirectional_stream* stream) {
GPR_ASSERT(0);
return 0;
}
-int cronet_bidirectional_stream_start(
- cronet_bidirectional_stream* stream, const char* url, int priority,
- const char* method, const cronet_bidirectional_stream_header_array* headers,
- bool end_of_stream) {
+int bidirectional_stream_start(bidirectional_stream* stream, const char* url,
+ int priority, const char* method,
+ const bidirectional_stream_header_array* headers,
+ bool end_of_stream) {
GPR_ASSERT(0);
return 0;
}
-int cronet_bidirectional_stream_read(cronet_bidirectional_stream* stream,
- char* buffer, int capacity) {
+int bidirectional_stream_read(bidirectional_stream* stream, char* buffer,
+ int capacity) {
GPR_ASSERT(0);
return 0;
}
-int cronet_bidirectional_stream_write(cronet_bidirectional_stream* stream,
- const char* buffer, int count,
- bool end_of_stream) {
+int bidirectional_stream_write(bidirectional_stream* stream, const char* buffer,
+ int count, bool end_of_stream) {
GPR_ASSERT(0);
return 0;
}
-int cronet_bidirectional_stream_cancel(cronet_bidirectional_stream* stream) {
+void bidirectional_stream_cancel(bidirectional_stream* stream) {
GPR_ASSERT(0);
- return 0;
}
#endif /* GRPC_COMPILE_WITH_CRONET */
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index 296c406324..2683abf47c 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -44,12 +44,14 @@
#include "src/core/ext/transport/chttp2/transport/incoming_metadata.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/transport_impl.h"
-#include "third_party/objective_c/Cronet/cronet_c_for_grpc.h"
+#include "third_party/Cronet/bidirectional_stream_c.h"
#define GRPC_HEADER_SIZE_IN_BYTES 5
@@ -86,19 +88,18 @@ enum e_op_id {
/* Cronet callbacks. See cronet_c_for_grpc.h for documentation for each. */
-static void on_request_headers_sent(cronet_bidirectional_stream *);
+static void on_request_headers_sent(bidirectional_stream *);
static void on_response_headers_received(
- cronet_bidirectional_stream *,
- const cronet_bidirectional_stream_header_array *, const char *);
-static void on_write_completed(cronet_bidirectional_stream *, const char *);
-static void on_read_completed(cronet_bidirectional_stream *, char *, int);
+ bidirectional_stream *, const bidirectional_stream_header_array *,
+ const char *);
+static void on_write_completed(bidirectional_stream *, const char *);
+static void on_read_completed(bidirectional_stream *, char *, int);
static void on_response_trailers_received(
- cronet_bidirectional_stream *,
- const cronet_bidirectional_stream_header_array *);
-static void on_succeeded(cronet_bidirectional_stream *);
-static void on_failed(cronet_bidirectional_stream *, int);
-static void on_canceled(cronet_bidirectional_stream *);
-static cronet_bidirectional_stream_callback cronet_callbacks = {
+ bidirectional_stream *, const bidirectional_stream_header_array *);
+static void on_succeeded(bidirectional_stream *);
+static void on_failed(bidirectional_stream *, int);
+static void on_canceled(bidirectional_stream *);
+static bidirectional_stream_callback cronet_callbacks = {
on_request_headers_sent,
on_response_headers_received,
on_read_completed,
@@ -111,7 +112,7 @@ static cronet_bidirectional_stream_callback cronet_callbacks = {
/* Cronet transport object */
struct grpc_cronet_transport {
grpc_transport base; /* must be first element in this structure */
- cronet_engine *engine;
+ stream_engine *engine;
char *host;
};
typedef struct grpc_cronet_transport grpc_cronet_transport;
@@ -176,8 +177,8 @@ struct stream_obj {
grpc_transport_stream_op *curr_op;
grpc_cronet_transport curr_ct;
grpc_stream *curr_gs;
- cronet_bidirectional_stream *cbs;
- cronet_bidirectional_stream_header_array header_array;
+ bidirectional_stream *cbs;
+ bidirectional_stream_header_array header_array;
/* Stream level state. Some state will be tracked both at stream and stream_op
* level */
@@ -344,11 +345,11 @@ static void execute_from_storage(stream_obj *s) {
/*
Cronet callback
*/
-static void on_failed(cronet_bidirectional_stream *stream, int net_error) {
+static void on_failed(bidirectional_stream *stream, int net_error) {
CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
- cronet_bidirectional_stream_destroy(s->cbs);
+ bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_FAILED] = true;
s->cbs = NULL;
if (s->header_array.headers) {
@@ -367,11 +368,11 @@ static void on_failed(cronet_bidirectional_stream *stream, int net_error) {
/*
Cronet callback
*/
-static void on_canceled(cronet_bidirectional_stream *stream) {
+static void on_canceled(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
- cronet_bidirectional_stream_destroy(s->cbs);
+ bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_CANCELED] = true;
s->cbs = NULL;
if (s->header_array.headers) {
@@ -390,11 +391,11 @@ static void on_canceled(cronet_bidirectional_stream *stream) {
/*
Cronet callback
*/
-static void on_succeeded(cronet_bidirectional_stream *stream) {
+static void on_succeeded(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
- cronet_bidirectional_stream_destroy(s->cbs);
+ bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_SUCCEEDED] = true;
s->cbs = NULL;
free_read_buffer(s);
@@ -405,7 +406,7 @@ static void on_succeeded(cronet_bidirectional_stream *stream) {
/*
Cronet callback
*/
-static void on_request_headers_sent(cronet_bidirectional_stream *stream) {
+static void on_request_headers_sent(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "W: on_request_headers_sent(%p)", stream);
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
@@ -424,8 +425,8 @@ static void on_request_headers_sent(cronet_bidirectional_stream *stream) {
Cronet callback
*/
static void on_response_headers_received(
- cronet_bidirectional_stream *stream,
- const cronet_bidirectional_stream_header_array *headers,
+ bidirectional_stream *stream,
+ const bidirectional_stream_header_array *headers,
const char *negotiated_protocol) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
@@ -438,9 +439,11 @@ static void on_response_headers_received(
for (size_t i = 0; i < headers->count; i++) {
grpc_chttp2_incoming_metadata_buffer_add(
&s->state.rs.initial_metadata,
- grpc_mdelem_from_metadata_strings(
- &exec_ctx, grpc_mdstr_from_string(headers->headers[i].key),
- grpc_mdstr_from_string(headers->headers[i].value)));
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].key)),
+ grpc_slice_intern(
+ grpc_slice_from_static_string(headers->headers[i].value))));
}
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -451,9 +454,9 @@ static void on_response_headers_received(
s->state.rs.read_buffer = s->state.rs.grpc_header_bytes;
s->state.rs.received_bytes = 0;
s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
- cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
- s->state.rs.remaining_bytes);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
+ bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
+ s->state.rs.remaining_bytes);
}
gpr_mu_unlock(&s->mu);
grpc_exec_ctx_finish(&exec_ctx);
@@ -463,8 +466,7 @@ static void on_response_headers_received(
/*
Cronet callback
*/
-static void on_write_completed(cronet_bidirectional_stream *stream,
- const char *data) {
+static void on_write_completed(bidirectional_stream *stream, const char *data) {
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
gpr_mu_lock(&s->mu);
@@ -480,7 +482,7 @@ static void on_write_completed(cronet_bidirectional_stream *stream,
/*
Cronet callback
*/
-static void on_read_completed(cronet_bidirectional_stream *stream, char *data,
+static void on_read_completed(bidirectional_stream *stream, char *data,
int count) {
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
@@ -488,16 +490,16 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data,
gpr_mu_lock(&s->mu);
s->state.state_callback_received[OP_RECV_MESSAGE] = true;
if (count > 0 && s->state.flush_read) {
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
- cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
+ bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096);
gpr_mu_unlock(&s->mu);
} else if (count > 0) {
s->state.rs.received_bytes += count;
s->state.rs.remaining_bytes -= count;
if (s->state.rs.remaining_bytes > 0) {
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
s->state.state_op_done[OP_READ_REQ_MADE] = true;
- cronet_bidirectional_stream_read(
+ bidirectional_stream_read(
s->cbs, s->state.rs.read_buffer + s->state.rs.received_bytes,
s->state.rs.remaining_bytes);
gpr_mu_unlock(&s->mu);
@@ -520,8 +522,8 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data,
Cronet callback
*/
static void on_response_trailers_received(
- cronet_bidirectional_stream *stream,
- const cronet_bidirectional_stream_header_array *trailers) {
+ bidirectional_stream *stream,
+ const bidirectional_stream_header_array *trailers) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
CRONET_LOG(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)", stream,
trailers);
@@ -536,9 +538,11 @@ static void on_response_trailers_received(
trailers->headers[i].value);
grpc_chttp2_incoming_metadata_buffer_add(
&s->state.rs.trailing_metadata,
- grpc_mdelem_from_metadata_strings(
- &exec_ctx, grpc_mdstr_from_string(trailers->headers[i].key),
- grpc_mdstr_from_string(trailers->headers[i].value)));
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].key)),
+ grpc_slice_intern(
+ grpc_slice_from_static_string(trailers->headers[i].value))));
s->state.rs.trailing_metadata_valid = true;
if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
0 != strcmp(trailers->headers[i].value, "0")) {
@@ -551,9 +555,9 @@ static void on_response_trailers_received(
if (!s->state.state_op_done[OP_SEND_TRAILING_METADATA] &&
!(s->state.state_op_done[OP_CANCEL_ERROR] ||
s->state.state_callback_received[OP_FAILED])) {
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, 0)", s->cbs);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
s->state.state_callback_received[OP_SEND_MESSAGE] = false;
- cronet_bidirectional_stream_write(s->cbs, "", 0, true);
+ bidirectional_stream_write(s->cbs, "", 0, true);
s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
gpr_mu_unlock(&s->mu);
@@ -594,7 +598,7 @@ static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
*/
static void convert_metadata_to_cronet_headers(
grpc_linked_mdelem *head, const char *host, char **pp_url,
- cronet_bidirectional_stream_header **pp_headers, size_t *p_num_headers,
+ bidirectional_stream_header **pp_headers, size_t *p_num_headers,
const char **method) {
grpc_linked_mdelem *curr = head;
/* Walk the linked list and get number of header fields */
@@ -605,9 +609,9 @@ static void convert_metadata_to_cronet_headers(
}
/* Allocate enough memory. It is freed in the on_request_headers_sent callback
*/
- cronet_bidirectional_stream_header *headers =
- (cronet_bidirectional_stream_header *)gpr_malloc(
- sizeof(cronet_bidirectional_stream_header) * num_headers_available);
+ bidirectional_stream_header *headers =
+ (bidirectional_stream_header *)gpr_malloc(
+ sizeof(bidirectional_stream_header) * num_headers_available);
*pp_headers = headers;
/* Walk the linked list again, this time copying the header fields.
@@ -618,33 +622,41 @@ static void convert_metadata_to_cronet_headers(
curr = head;
size_t num_headers = 0;
while (num_headers < num_headers_available) {
- grpc_mdelem *mdelem = curr->md;
+ grpc_mdelem mdelem = curr->md;
curr = curr->next;
- const char *key = grpc_mdstr_as_c_string(mdelem->key);
- const char *value = grpc_mdstr_as_c_string(mdelem->value);
- if (mdelem->key == GRPC_MDSTR_SCHEME ||
- mdelem->key == GRPC_MDSTR_AUTHORITY) {
+ char *key = grpc_slice_to_c_string(GRPC_MDKEY(mdelem));
+ char *value = grpc_slice_to_c_string(GRPC_MDVALUE(mdelem));
+ if (grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_SCHEME) ||
+ grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_AUTHORITY)) {
/* Cronet populates these fields on its own */
+ gpr_free(key);
+ gpr_free(value);
continue;
}
- if (mdelem->key == GRPC_MDSTR_METHOD) {
- if (mdelem->value == GRPC_MDSTR_PUT) {
+ if (grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_METHOD)) {
+ if (grpc_slice_eq(GRPC_MDVALUE(mdelem), GRPC_MDSTR_PUT)) {
*method = "PUT";
} else {
/* POST method in default*/
*method = "POST";
}
+ gpr_free(key);
+ gpr_free(value);
continue;
}
- if (mdelem->key == GRPC_MDSTR_PATH) {
+ if (grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_PATH)) {
/* Create URL by appending :path value to the hostname */
gpr_asprintf(pp_url, "https://%s%s", host, value);
+ gpr_free(key);
+ gpr_free(value);
continue;
}
CRONET_LOG(GPR_DEBUG, "header %s = %s", key, value);
headers[num_headers].key = key;
headers[num_headers].value = value;
num_headers++;
+ gpr_free(key);
+ gpr_free(value);
if (curr == NULL) {
break;
}
@@ -664,7 +676,7 @@ static int parse_grpc_header(const uint8_t *data) {
static bool header_has_authority(grpc_linked_mdelem *head) {
while (head != NULL) {
- if (head->md->key == GRPC_MDSTR_AUTHORITY) {
+ if (grpc_slice_eq(GRPC_MDKEY(head->md), GRPC_MDSTR_AUTHORITY)) {
return true;
}
head = head->next;
@@ -833,9 +845,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
* on_failed */
GPR_ASSERT(s->cbs == NULL);
GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]);
- s->cbs = cronet_bidirectional_stream_create(s->curr_ct.engine, s->curr_gs,
- &cronet_callbacks);
- CRONET_LOG(GPR_DEBUG, "%p = cronet_bidirectional_stream_create()", s->cbs);
+ s->cbs = bidirectional_stream_create(s->curr_ct.engine, s->curr_gs,
+ &cronet_callbacks);
+ CRONET_LOG(GPR_DEBUG, "%p = bidirectional_stream_create()", s->cbs);
char *url = NULL;
const char *method = "POST";
s->header_array.headers = NULL;
@@ -843,10 +855,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_op->send_initial_metadata->list.head, s->curr_ct.host, &url,
&s->header_array.headers, &s->header_array.count, &method);
s->header_array.capacity = s->header_array.count;
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_start(%p, %s)", s->cbs,
- url);
- cronet_bidirectional_stream_start(s->cbs, url, 0, method, &s->header_array,
- false);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_start(%p, %s)", s->cbs, url);
+ bidirectional_stream_start(s->cbs, url, 0, method, &s->header_array, false);
stream_state->state_op_done[OP_SEND_INITIAL_METADATA] = true;
result = ACTION_TAKEN_WITH_CALLBACK;
} else if (stream_op->recv_initial_metadata &&
@@ -862,7 +872,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else {
grpc_chttp2_incoming_metadata_buffer_publish(
- &oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata);
+ exec_ctx, &oas->s->state.rs.initial_metadata,
+ stream_op->recv_initial_metadata);
grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_NONE);
}
@@ -897,11 +908,11 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
size_t write_buffer_size;
create_grpc_frame(&write_slice_buffer, &stream_state->ws.write_buffer,
&write_buffer_size);
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, %p)",
- s->cbs, stream_state->ws.write_buffer);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, %p)", s->cbs,
+ stream_state->ws.write_buffer);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
- cronet_bidirectional_stream_write(s->cbs, stream_state->ws.write_buffer,
- (int)write_buffer_size, false);
+ bidirectional_stream_write(s->cbs, stream_state->ws.write_buffer,
+ (int)write_buffer_size, false);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
result = NO_ACTION_POSSIBLE;
@@ -949,11 +960,11 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(stream_state->rs.read_buffer);
stream_state->rs.remaining_bytes = stream_state->rs.length_field;
stream_state->rs.received_bytes = 0;
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
- cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
- stream_state->rs.remaining_bytes);
+ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
+ stream_state->rs.remaining_bytes);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
stream_state->rs.remaining_bytes = 0;
@@ -974,11 +985,11 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
- cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
- stream_state->rs.remaining_bytes);
+ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
+ stream_state->rs.remaining_bytes);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
result = NO_ACTION_POSSIBLE;
@@ -1008,9 +1019,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.received_bytes = 0;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.length_field_received = false;
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs);
- cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
- stream_state->rs.remaining_bytes);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
+ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
+ stream_state->rs.remaining_bytes);
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_op->recv_trailing_metadata &&
@@ -1019,7 +1030,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_TRAILING_METADATA", oas);
if (oas->s->state.rs.trailing_metadata_valid) {
grpc_chttp2_incoming_metadata_buffer_publish(
- &oas->s->state.rs.trailing_metadata,
+ exec_ctx, &oas->s->state.rs.trailing_metadata,
stream_op->recv_trailing_metadata);
stream_state->rs.trailing_metadata_valid = false;
}
@@ -1033,10 +1044,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
} else {
- CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, 0)",
- s->cbs);
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
- cronet_bidirectional_stream_write(s->cbs, "", 0, true);
+ bidirectional_stream_write(s->cbs, "", 0, true);
result = ACTION_TAKEN_WITH_CALLBACK;
}
stream_state->state_op_done[OP_SEND_TRAILING_METADATA] = true;
@@ -1044,9 +1054,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, stream_state, &oas->state,
OP_CANCEL_ERROR)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
- CRONET_LOG(GPR_DEBUG, "W: cronet_bidirectional_stream_cancel(%p)", s->cbs);
+ CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
if (s->cbs) {
- cronet_bidirectional_stream_cancel(s->cbs);
+ bidirectional_stream_cancel(s->cbs);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
result = ACTION_TAKEN_NO_CALLBACK;
@@ -1143,18 +1153,17 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
header_has_authority(op->send_initial_metadata->list.head)) {
/* Cronet does not support :authority header field. We cancel the call when
this field is present in metadata */
- cronet_bidirectional_stream_header_array header_array;
- cronet_bidirectional_stream_header *header;
- cronet_bidirectional_stream cbs;
+ bidirectional_stream_header_array header_array;
+ bidirectional_stream_header *header;
+ bidirectional_stream cbs;
CRONET_LOG(GPR_DEBUG,
":authority header is provided but not supported;"
" cancel operations");
/* Notify application that operation is cancelled by forging trailers */
header_array.count = 1;
header_array.capacity = 1;
- header_array.headers =
- gpr_malloc(sizeof(cronet_bidirectional_stream_header));
- header = (cronet_bidirectional_stream_header *)header_array.headers;
+ header_array.headers = gpr_malloc(sizeof(bidirectional_stream_header));
+ header = (bidirectional_stream_header *)header_array.headers;
header->key = "grpc-status";
header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */
cbs.annotation = (void *)s;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 8f08b427fb..ec973d4e7f 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -170,7 +170,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
+ grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
@@ -288,41 +288,10 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
sizeof(grpc_call_stack)));
}
-static void destroy_op(grpc_exec_ctx *exec_ctx, void *op, grpc_error *error) {
- gpr_free(op);
-}
-
-void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
- memset(op, 0, sizeof(*op));
- op->cancel_error = GRPC_ERROR_CANCELLED;
- op->on_complete =
- grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
- elem->filter->start_transport_stream_op(exec_ctx, elem, op);
-}
-
-void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_status_code status,
- grpc_slice *optional_message) {
- grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
- memset(op, 0, sizeof(*op));
- op->on_complete =
- grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
- grpc_transport_stream_op_add_cancellation_with_message(exec_ctx, op, status,
- optional_message);
- elem->filter->start_transport_stream_op(exec_ctx, elem, op);
-}
-
-void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_status_code status,
- grpc_slice *optional_message) {
- grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
- memset(op, 0, sizeof(*op));
- op->on_complete =
- grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
- grpc_transport_stream_op_add_close(exec_ctx, op, status, optional_message);
+void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_error *error) {
+ grpc_transport_stream_op *op = grpc_make_transport_stream_op(NULL);
+ op->cancel_error = error;
elem->filter->start_transport_stream_op(exec_ctx, elem, op);
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index d9d3a85233..1cf07d43c2 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -81,7 +81,7 @@ typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
- grpc_mdstr *path;
+ grpc_slice path;
gpr_timespec start_time;
gpr_timespec deadline;
} grpc_call_element_args;
@@ -238,7 +238,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
+ grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
@@ -299,18 +299,9 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op);
-void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem);
-
-void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_status_code status,
- grpc_slice *optional_message);
-
-void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_status_code status,
- grpc_slice *optional_message);
+void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *cur_elem,
+ grpc_error *error);
extern int grpc_trace_channel;
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 337c194b79..c860d60d88 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -45,6 +45,7 @@
#include "src/core/lib/compression/message_compress.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -80,39 +81,6 @@ typedef struct channel_data {
uint32_t supported_compression_algorithms;
} channel_data;
-/** For each \a md element from the incoming metadata, filter out the entry for
- * "grpc-encoding", using its value to populate the call data's
- * compression_algorithm field. */
-static grpc_mdelem *compression_md_filter(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- if (md->key == GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST) {
- const char *md_c_str = grpc_mdstr_as_c_string(md->value);
- if (!grpc_compression_algorithm_parse(md_c_str, strlen(md_c_str),
- &calld->compression_algorithm)) {
- gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (unknown). Ignoring.",
- md_c_str);
- calld->compression_algorithm = GRPC_COMPRESS_NONE;
- }
- if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
- calld->compression_algorithm)) {
- gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (previously disabled). "
- "Ignoring.",
- md_c_str);
- calld->compression_algorithm = GRPC_COMPRESS_NONE;
- }
- calld->has_compression_algorithm = 1;
- return NULL;
- }
-
- return md;
-}
-
static int skip_compression(grpc_call_element *elem, uint32_t flags) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
@@ -131,32 +99,65 @@ static int skip_compression(grpc_call_element *elem, uint32_t flags) {
}
/** Filter initial metadata */
-static void process_send_initial_metadata(
+static grpc_error *process_send_initial_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_metadata_batch *initial_metadata) GRPC_MUST_USE_RESULT;
+static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata) {
+ grpc_error *error;
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* Parse incoming request for compression. If any, it'll be available
* at calld->compression_algorithm */
- grpc_metadata_batch_filter(exec_ctx, initial_metadata, compression_md_filter,
- elem);
- if (!calld->has_compression_algorithm) {
+ if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) {
+ grpc_mdelem md =
+ initial_metadata->idx.named.grpc_internal_encoding_request->md;
+ if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
+ &calld->compression_algorithm)) {
+ char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR,
+ "Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
+ gpr_free(val);
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ }
+ if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
+ calld->compression_algorithm)) {
+ char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR,
+ "Invalid compression algorithm: '%s' (previously disabled). "
+ "Ignoring.",
+ val);
+ gpr_free(val);
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ }
+ calld->has_compression_algorithm = 1;
+
+ grpc_metadata_batch_remove(
+ exec_ctx, initial_metadata,
+ initial_metadata->idx.named.grpc_internal_encoding_request);
+ } else {
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
calld->compression_algorithm = channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
+
/* hint compression algorithm */
- grpc_metadata_batch_add_tail(
- initial_metadata, &calld->compression_algorithm_storage,
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
grpc_compression_encoding_mdelem(calld->compression_algorithm));
+ if (error != GRPC_ERROR_NONE) return error;
+
/* convey supported compression algorithms */
- grpc_metadata_batch_add_tail(initial_metadata,
- &calld->accept_encoding_storage,
- GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
- channeld->supported_compression_algorithms));
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata, &calld->accept_encoding_storage,
+ GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
+ channeld->supported_compression_algorithms));
+
+ return error;
}
static void continue_send_message(grpc_exec_ctx *exec_ctx,
@@ -247,7 +248,12 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
if (op->send_initial_metadata) {
- process_send_initial_metadata(exec_ctx, elem, op->send_initial_metadata);
+ grpc_error *error = process_send_initial_metadata(
+ exec_ctx, elem, op->send_initial_metadata);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ return;
+ }
}
if (op->send_message != NULL &&
!skip_compression(elem, op->send_message->flags)) {
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index a45a4d4b82..bc9a2effc2 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -56,10 +56,11 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
deadline_state->timer_pending = false;
gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
- grpc_slice msg = grpc_slice_from_static_string("Deadline Exceeded");
- grpc_call_element_send_cancel_with_message(
- exec_ctx, elem, GRPC_STATUS_DEADLINE_EXCEEDED, &msg);
- grpc_slice_unref_internal(exec_ctx, msg);
+ grpc_call_element_signal_error(
+ exec_ctx, elem,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_DEADLINE_EXCEEDED));
}
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
@@ -196,8 +197,7 @@ void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op) {
grpc_deadline_state* deadline_state = elem->call_data;
- if (op->cancel_error != GRPC_ERROR_NONE ||
- op->close_error != GRPC_ERROR_NONE) {
+ if (op->cancel_error != GRPC_ERROR_NONE) {
cancel_timer_if_needed(exec_ctx, deadline_state);
} else {
// Make sure we know when the call is complete, so that we can cancel
@@ -285,8 +285,7 @@ static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
server_call_data* calld = elem->call_data;
- if (op->cancel_error != GRPC_ERROR_NONE ||
- op->close_error != GRPC_ERROR_NONE) {
+ if (op->cancel_error != GRPC_ERROR_NONE) {
cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
} else {
// If we're receiving initial metadata, we need to get the deadline
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index d154450988..49a2a980e0 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -38,6 +38,7 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/transport_impl.h"
@@ -88,77 +89,104 @@ typedef struct call_data {
} call_data;
typedef struct channel_data {
- grpc_mdelem *static_scheme;
- grpc_mdelem *user_agent;
+ grpc_mdelem static_scheme;
+ grpc_mdelem user_agent;
size_t max_payload_size_for_get;
} channel_data;
-static grpc_mdelem *client_recv_filter(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- if (md == GRPC_MDELEM_STATUS_200) {
- return NULL;
- } else if (md->key == GRPC_MDSTR_STATUS) {
- char *message_string;
- gpr_asprintf(&message_string, "Received http2 header with status: %s",
- grpc_mdstr_as_c_string(md->value));
- grpc_slice message = grpc_slice_from_copied_string(message_string);
- gpr_free(message_string);
- grpc_call_element_send_close_with_message(exec_ctx, elem,
- GRPC_STATUS_CANCELLED, &message);
- return NULL;
- } else if (md->key == GRPC_MDSTR_GRPC_MESSAGE) {
- grpc_slice pct_decoded_msg =
- grpc_permissive_percent_decode_slice(md->value->slice);
- if (grpc_slice_is_equivalent(pct_decoded_msg, md->value->slice)) {
- grpc_slice_unref_internal(exec_ctx, pct_decoded_msg);
- return md;
+static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_metadata_batch *b) {
+ if (b->idx.named.status != NULL) {
+ if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.status);
} else {
- return grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_mdstr_from_slice(exec_ctx, pct_decoded_msg));
+ char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
+ GPR_DUMP_ASCII);
+ char *msg;
+ gpr_asprintf(&msg, "Received http2 header with status: %s", val);
+ grpc_error *e = grpc_error_set_str(
+ grpc_error_set_int(
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE(
+ "Received http2 :status header with non-200 OK status"),
+ GRPC_ERROR_STR_VALUE, val),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED),
+ GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+ gpr_free(val);
+ gpr_free(msg);
+ return e;
}
- } else if (md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
- return NULL;
- } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
- const char *value_str = grpc_mdstr_as_c_string(md->value);
- if (strncmp(value_str, EXPECTED_CONTENT_TYPE,
- EXPECTED_CONTENT_TYPE_LENGTH) == 0 &&
- (value_str[EXPECTED_CONTENT_TYPE_LENGTH] == '+' ||
- value_str[EXPECTED_CONTENT_TYPE_LENGTH] == ';')) {
- /* Although the C implementation doesn't (currently) generate them,
- any custom +-suffix is explicitly valid. */
- /* TODO(klempner): We should consider preallocating common values such
- as +proto or +json, or at least stashing them if we see them. */
- /* TODO(klempner): Should we be surfacing this to application code? */
+ }
+
+ if (b->idx.named.grpc_message != NULL) {
+ grpc_slice pct_decoded_msg = grpc_permissive_percent_decode_slice(
+ GRPC_MDVALUE(b->idx.named.grpc_message->md));
+ if (grpc_slice_is_equivalent(pct_decoded_msg,
+ GRPC_MDVALUE(b->idx.named.grpc_message->md))) {
+ grpc_slice_unref_internal(exec_ctx, pct_decoded_msg);
} else {
- /* TODO(klempner): We're currently allowing this, but we shouldn't
- see it without a proxy so log for now. */
- gpr_log(GPR_INFO, "Unexpected content-type '%s'", value_str);
+ grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message,
+ pct_decoded_msg);
}
- return NULL;
}
- return md;
+
+ if (b->idx.named.content_type != NULL) {
+ if (!grpc_mdelem_eq(b->idx.named.content_type->md,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)) {
+ if (grpc_slice_buf_start_eq(GRPC_MDVALUE(b->idx.named.content_type->md),
+ EXPECTED_CONTENT_TYPE,
+ EXPECTED_CONTENT_TYPE_LENGTH) &&
+ (GRPC_SLICE_START_PTR(GRPC_MDVALUE(
+ b->idx.named.content_type->md))[EXPECTED_CONTENT_TYPE_LENGTH] ==
+ '+' ||
+ GRPC_SLICE_START_PTR(GRPC_MDVALUE(
+ b->idx.named.content_type->md))[EXPECTED_CONTENT_TYPE_LENGTH] ==
+ ';')) {
+ /* Although the C implementation doesn't (currently) generate them,
+ any custom +-suffix is explicitly valid. */
+ /* TODO(klempner): We should consider preallocating common values such
+ as +proto or +json, or at least stashing them if we see them. */
+ /* TODO(klempner): Should we be surfacing this to application code? */
+ } else {
+ /* TODO(klempner): We're currently allowing this, but we shouldn't
+ see it without a proxy so log for now. */
+ char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
+ gpr_free(val);
+ }
+ }
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type);
+ }
+
+ return GRPC_ERROR_NONE;
}
static void hc_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- grpc_metadata_batch_filter(exec_ctx, calld->recv_initial_metadata,
- client_recv_filter, elem);
- grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata,
- GRPC_ERROR_REF(error));
+ if (error == GRPC_ERROR_NONE) {
+ error = client_filter_incoming_metadata(exec_ctx, elem,
+ calld->recv_initial_metadata);
+ } else {
+ GRPC_ERROR_REF(error);
+ }
+ grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- grpc_metadata_batch_filter(exec_ctx, calld->recv_trailing_metadata,
- client_recv_filter, elem);
- grpc_closure_run(exec_ctx, calld->on_done_recv_trailing_metadata,
- GRPC_ERROR_REF(error));
+ if (error == GRPC_ERROR_NONE) {
+ error = client_filter_incoming_metadata(exec_ctx, elem,
+ calld->recv_trailing_metadata);
+ } else {
+ GRPC_ERROR_REF(error);
+ }
+ grpc_closure_run(exec_ctx, calld->on_done_recv_trailing_metadata, error);
}
static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -179,15 +207,12 @@ static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
}
-static grpc_mdelem *client_strip_filter(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_mdelem *md) {
- /* eat the things we'd like to set ourselves */
- if (md->key == GRPC_MDSTR_METHOD) return NULL;
- if (md->key == GRPC_MDSTR_SCHEME) return NULL;
- if (md->key == GRPC_MDSTR_TE) return NULL;
- if (md->key == GRPC_MDSTR_CONTENT_TYPE) return NULL;
- if (md->key == GRPC_MDSTR_USER_AGENT) return NULL;
- return md;
+static void remove_if_present(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_metadata_batch_callouts_index idx) {
+ if (batch->idx.array[idx] != NULL) {
+ grpc_metadata_batch_remove(exec_ctx, batch, batch->idx.array[idx]);
+ }
}
static void continue_send_message(grpc_exec_ctx *exec_ctx,
@@ -226,18 +251,20 @@ static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
}
}
-static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
+ grpc_error *error;
if (op->send_initial_metadata != NULL) {
/* Decide which HTTP VERB to use. We use GET if the request is marked
cacheable, and the operation contains both initial metadata and send
message, and the payload is below the size threshold, and all the data
for this request is immediately available. */
- grpc_mdelem *method = GRPC_MDELEM_METHOD_POST;
+ grpc_mdelem method = GRPC_MDELEM_METHOD_POST;
if ((op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
op->send_message != NULL &&
@@ -254,7 +281,7 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Attempt to read the data from send_message and create a header field. */
- if (method == GRPC_MDELEM_METHOD_GET) {
+ if (grpc_mdelem_eq(method, GRPC_MDELEM_METHOD_GET)) {
/* allocate memory to hold the entire payload */
calld->payload_bytes = gpr_malloc(op->send_message->length);
@@ -267,12 +294,14 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (calld->send_message_blocked == false) {
/* when all the send_message data is available, then create a MDELEM and
append to headers */
- grpc_mdelem *payload_bin = grpc_mdelem_from_metadata_strings(
+ grpc_mdelem payload_bin = grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_GRPC_PAYLOAD_BIN,
- grpc_mdstr_from_buffer(calld->payload_bytes,
- op->send_message->length));
- grpc_metadata_batch_add_tail(op->send_initial_metadata,
- &calld->payload_bin, payload_bin);
+ grpc_slice_from_copied_buffer((const char *)calld->payload_bytes,
+ op->send_message->length));
+ error =
+ grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
+ &calld->payload_bin, payload_bin);
+ if (error != GRPC_ERROR_NONE) return error;
calld->on_complete = op->on_complete;
op->on_complete = &calld->hc_on_complete;
op->send_message = NULL;
@@ -285,21 +314,35 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
}
- grpc_metadata_batch_filter(exec_ctx, op->send_initial_metadata,
- client_strip_filter, elem);
+ remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_METHOD);
+ remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_SCHEME);
+ remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_TE);
+ remove_if_present(exec_ctx, op->send_initial_metadata,
+ GRPC_BATCH_CONTENT_TYPE);
+ remove_if_present(exec_ctx, op->send_initial_metadata,
+ GRPC_BATCH_USER_AGENT);
+
/* Send : prefixed headers, which have to be before any application
layer headers. */
- grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
- method);
- grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
- channeld->static_scheme);
- grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
- GRPC_MDELEM_TE_TRAILERS);
- grpc_metadata_batch_add_tail(
- op->send_initial_metadata, &calld->content_type,
+ error = grpc_metadata_batch_add_head(exec_ctx, op->send_initial_metadata,
+ &calld->method, method);
+ if (error != GRPC_ERROR_NONE) return error;
+ error =
+ grpc_metadata_batch_add_head(exec_ctx, op->send_initial_metadata,
+ &calld->scheme, channeld->static_scheme);
+ if (error != GRPC_ERROR_NONE) return error;
+ error = grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
+ &calld->te_trailers,
+ GRPC_MDELEM_TE_TRAILERS);
+ if (error != GRPC_ERROR_NONE) return error;
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, op->send_initial_metadata, &calld->content_type,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
- grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->user_agent,
- GRPC_MDELEM_REF(channeld->user_agent));
+ if (error != GRPC_ERROR_NONE) return error;
+ error = grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
+ &calld->user_agent,
+ GRPC_MDELEM_REF(channeld->user_agent));
+ if (error != GRPC_ERROR_NONE) return error;
}
if (op->recv_initial_metadata != NULL) {
@@ -315,6 +358,8 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
calld->on_done_recv_trailing_metadata = op->on_complete;
op->on_complete = &calld->hc_on_recv_trailing_metadata;
}
+
+ return GRPC_ERROR_NONE;
}
static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -322,15 +367,20 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- hc_mutate_op(exec_ctx, elem, op);
- GPR_TIMER_END("hc_start_transport_op", 0);
- call_data *calld = elem->call_data;
- if (op->send_message != NULL && calld->send_message_blocked) {
- /* Don't forward the op. send_message contains slices that aren't ready
- yet. The call will be forwarded by the op_complete of slice read call. */
+ grpc_error *error = hc_mutate_op(exec_ctx, elem, op);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
} else {
- grpc_call_next_op(exec_ctx, elem, op);
+ call_data *calld = elem->call_data;
+ if (op->send_message != NULL && calld->send_message_blocked) {
+ /* Don't forward the op. send_message contains slices that aren't ready
+ yet. The call will be forwarded by the op_complete of slice read call.
+ */
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
}
+ GPR_TIMER_END("hc_start_transport_op", 0);
}
/* Constructor for call_data */
@@ -367,18 +417,18 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
}
-static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
+static grpc_mdelem scheme_from_args(const grpc_channel_args *args) {
unsigned i;
size_t j;
- grpc_mdelem *valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
- GRPC_MDELEM_SCHEME_HTTPS};
+ grpc_mdelem valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_SCHEME_HTTPS};
if (args != NULL) {
for (i = 0; i < args->num_args; ++i) {
if (args->args[i].type == GRPC_ARG_STRING &&
strcmp(args->args[i].key, GRPC_ARG_HTTP2_SCHEME) == 0) {
for (j = 0; j < GPR_ARRAY_SIZE(valid_schemes); j++) {
- if (0 == strcmp(grpc_mdstr_as_c_string(valid_schemes[j]->value),
- args->args[i].value.string)) {
+ if (0 == grpc_slice_str_cmp(GRPC_MDVALUE(valid_schemes[j]),
+ args->args[i].value.string)) {
return valid_schemes[j];
}
}
@@ -404,13 +454,13 @@ static size_t max_payload_size_from_args(const grpc_channel_args *args) {
return kMaxPayloadSizeForGet;
}
-static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
- const char *transport_name) {
+static grpc_slice user_agent_from_args(const grpc_channel_args *args,
+ const char *transport_name) {
gpr_strvec v;
size_t i;
int is_first = 1;
char *tmp;
- grpc_mdstr *result;
+ grpc_slice result;
gpr_strvec_init(&v);
@@ -448,7 +498,7 @@ static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
tmp = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
- result = grpc_mdstr_from_string(tmp);
+ result = grpc_slice_intern(grpc_slice_from_static_string(tmp));
gpr_free(tmp);
return result;
@@ -464,7 +514,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->static_scheme = scheme_from_args(args->channel_args);
chand->max_payload_size_for_get =
max_payload_size_from_args(args->channel_args);
- chand->user_agent = grpc_mdelem_from_metadata_strings(
+ chand->user_agent = grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_USER_AGENT,
user_agent_from_args(args->channel_args,
args->optional_transport->vtable->name));
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index f508231238..3f992977c0 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -39,6 +39,7 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/static_metadata.h"
#define EXPECTED_CONTENT_TYPE "application/grpc"
@@ -47,18 +48,13 @@
extern int grpc_http_trace;
typedef struct call_data {
- uint8_t seen_path;
- uint8_t seen_method;
- uint8_t sent_status;
- uint8_t seen_scheme;
- uint8_t seen_te_trailers;
- uint8_t seen_authority;
- uint8_t seen_payload_bin;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
+ /* did this request come with payload-bin */
+ bool seen_payload_bin;
/* flag to ensure payload_bin is delivered only once */
- uint8_t payload_bin_delivered;
+ bool payload_bin_delivered;
grpc_metadata_batch *recv_initial_metadata;
bool *recv_idempotent_request;
@@ -83,109 +79,152 @@ typedef struct call_data {
typedef struct channel_data { uint8_t unused; } channel_data;
-static grpc_mdelem *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_mdelem *md) {
- if (md->key == GRPC_MDSTR_GRPC_MESSAGE) {
+static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_metadata_batch *b) {
+ if (b->idx.named.grpc_message != NULL) {
grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
- md->value->slice, grpc_compatible_percent_encoding_unreserved_bytes);
- if (grpc_slice_is_equivalent(pct_encoded_msg, md->value->slice)) {
+ GRPC_MDVALUE(b->idx.named.grpc_message->md),
+ grpc_compatible_percent_encoding_unreserved_bytes);
+ if (grpc_slice_is_equivalent(pct_encoded_msg,
+ GRPC_MDVALUE(b->idx.named.grpc_message->md))) {
grpc_slice_unref_internal(exec_ctx, pct_encoded_msg);
- return md;
} else {
- return grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_mdstr_from_slice(exec_ctx, pct_encoded_msg));
+ grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message,
+ pct_encoded_msg);
}
- } else {
- return md;
}
+ return GRPC_ERROR_NONE;
}
-static grpc_mdelem *server_filter(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
+static void add_error(const char *error_name, grpc_error **cumulative,
+ grpc_error *new) {
+ if (new == GRPC_ERROR_NONE) return;
+ if (*cumulative == GRPC_ERROR_NONE) {
+ *cumulative = GRPC_ERROR_CREATE(error_name);
+ }
+ *cumulative = grpc_error_add_child(*cumulative, new);
+}
+
+static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_metadata_batch *b) {
call_data *calld = elem->call_data;
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *error_name = "Failed processing incoming headers";
- /* Check if it is one of the headers we care about. */
- if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
- md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_METHOD_GET ||
- md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
- md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
- /* swallow it */
- if (md == GRPC_MDELEM_METHOD_POST) {
- calld->seen_method = 1;
+ if (b->idx.named.method != NULL) {
+ if (grpc_mdelem_eq(b->idx.named.method->md, GRPC_MDELEM_METHOD_POST)) {
*calld->recv_idempotent_request = false;
*calld->recv_cacheable_request = false;
- } else if (md == GRPC_MDELEM_METHOD_PUT) {
- calld->seen_method = 1;
+ } else if (grpc_mdelem_eq(b->idx.named.method->md,
+ GRPC_MDELEM_METHOD_PUT)) {
*calld->recv_idempotent_request = true;
- } else if (md == GRPC_MDELEM_METHOD_GET) {
- calld->seen_method = 1;
+ } else if (grpc_mdelem_eq(b->idx.named.method->md,
+ GRPC_MDELEM_METHOD_GET)) {
*calld->recv_cacheable_request = true;
- } else if (md->key == GRPC_MDSTR_SCHEME) {
- calld->seen_scheme = 1;
- } else if (md == GRPC_MDELEM_TE_TRAILERS) {
- calld->seen_te_trailers = 1;
- }
- /* TODO(klempner): Track that we've seen all the headers we should
- require */
- return NULL;
- } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
- const char *value_str = grpc_mdstr_as_c_string(md->value);
- if (strncmp(value_str, EXPECTED_CONTENT_TYPE,
- EXPECTED_CONTENT_TYPE_LENGTH) == 0 &&
- (value_str[EXPECTED_CONTENT_TYPE_LENGTH] == '+' ||
- value_str[EXPECTED_CONTENT_TYPE_LENGTH] == ';')) {
- /* Although the C implementation doesn't (currently) generate them,
- any custom +-suffix is explicitly valid. */
- /* TODO(klempner): We should consider preallocating common values such
- as +proto or +json, or at least stashing them if we see them. */
- /* TODO(klempner): Should we be surfacing this to application code? */
} else {
- /* TODO(klempner): We're currently allowing this, but we shouldn't
- see it without a proxy so log for now. */
- gpr_log(GPR_INFO, "Unexpected content-type '%s'", value_str);
+ add_error(error_name, &error,
+ grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
+ b->idx.named.method->md));
+ }
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.method);
+ } else {
+ add_error(error_name, &error,
+ grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
+ GRPC_ERROR_STR_KEY, ":method"));
+ }
+
+ if (b->idx.named.te != NULL) {
+ if (!grpc_mdelem_eq(b->idx.named.te->md, GRPC_MDELEM_TE_TRAILERS)) {
+ add_error(error_name, &error,
+ grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
+ b->idx.named.te->md));
+ }
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.te);
+ } else {
+ add_error(error_name, &error,
+ grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
+ GRPC_ERROR_STR_KEY, "te"));
+ }
+
+ if (b->idx.named.scheme != NULL) {
+ if (!grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_HTTP) &&
+ !grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_HTTPS) &&
+ !grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_GRPC)) {
+ add_error(error_name, &error,
+ grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
+ b->idx.named.scheme->md));
}
- return NULL;
- } else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
- md->key == GRPC_MDSTR_SCHEME) {
- gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
- grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
- /* swallow it and error everything out. */
- /* TODO(klempner): We ought to generate more descriptive error messages
- on the wire here. */
- grpc_call_element_send_cancel(exec_ctx, elem);
- return NULL;
- } else if (md->key == GRPC_MDSTR_PATH) {
- if (calld->seen_path) {
- gpr_log(GPR_ERROR, "Received :path twice");
- return NULL;
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.scheme);
+ } else {
+ add_error(error_name, &error,
+ grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
+ GRPC_ERROR_STR_KEY, ":scheme"));
+ }
+
+ if (b->idx.named.content_type != NULL) {
+ if (!grpc_mdelem_eq(b->idx.named.content_type->md,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)) {
+ if (grpc_slice_buf_start_eq(GRPC_MDVALUE(b->idx.named.content_type->md),
+ EXPECTED_CONTENT_TYPE,
+ EXPECTED_CONTENT_TYPE_LENGTH) &&
+ (GRPC_SLICE_START_PTR(GRPC_MDVALUE(
+ b->idx.named.content_type->md))[EXPECTED_CONTENT_TYPE_LENGTH] ==
+ '+' ||
+ GRPC_SLICE_START_PTR(GRPC_MDVALUE(
+ b->idx.named.content_type->md))[EXPECTED_CONTENT_TYPE_LENGTH] ==
+ ';')) {
+ /* Although the C implementation doesn't (currently) generate them,
+ any custom +-suffix is explicitly valid. */
+ /* TODO(klempner): We should consider preallocating common values such
+ as +proto or +json, or at least stashing them if we see them. */
+ /* TODO(klempner): Should we be surfacing this to application code? */
+ } else {
+ /* TODO(klempner): We're currently allowing this, but we shouldn't
+ see it without a proxy so log for now. */
+ char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
+ gpr_free(val);
+ }
}
- calld->seen_path = 1;
- return md;
- } else if (md->key == GRPC_MDSTR_AUTHORITY) {
- calld->seen_authority = 1;
- return md;
- } else if (md->key == GRPC_MDSTR_HOST) {
- /* translate host to :authority since :authority may be
- omitted */
- grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
- calld->seen_authority = 1;
- return authority;
- } else if (md->key == GRPC_MDSTR_GRPC_PAYLOAD_BIN) {
- /* Retrieve the payload from the value of the 'grpc-internal-payload-bin'
- header field */
- calld->seen_payload_bin = 1;
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type);
+ }
+
+ if (b->idx.named.path == NULL) {
+ add_error(error_name, &error,
+ grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
+ GRPC_ERROR_STR_KEY, ":path"));
+ }
+
+ if (b->idx.named.host != NULL) {
+ add_error(
+ error_name, &error,
+ grpc_metadata_batch_substitute(
+ exec_ctx, b, b->idx.named.host,
+ grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.host->md)))));
+ }
+
+ if (b->idx.named.authority == NULL) {
+ add_error(error_name, &error,
+ grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
+ GRPC_ERROR_STR_KEY, ":authority"));
+ }
+
+ if (b->idx.named.grpc_payload_bin != NULL) {
+ calld->seen_payload_bin = true;
grpc_slice_buffer_add(&calld->read_slice_buffer,
- grpc_slice_ref_internal(md->value->slice));
+ grpc_slice_ref_internal(
+ GRPC_MDVALUE(b->idx.named.grpc_payload_bin->md)));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
- return NULL;
- } else {
- return md;
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_payload_bin);
}
+
+ return error;
}
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -193,49 +232,12 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (err == GRPC_ERROR_NONE) {
- grpc_metadata_batch_filter(exec_ctx, calld->recv_initial_metadata,
- server_filter, elem);
- /* Have we seen the required http2 transport headers?
- (:method, :scheme, content-type, with :path and :authority covered
- at the channel level right now) */
- if (calld->seen_method && calld->seen_scheme && calld->seen_te_trailers &&
- calld->seen_path && calld->seen_authority) {
- /* do nothing */
- } else {
- err = GRPC_ERROR_CREATE("Bad incoming HTTP headers");
- if (!calld->seen_path) {
- err = grpc_error_add_child(err,
- GRPC_ERROR_CREATE("Missing :path header"));
- }
- if (!calld->seen_authority) {
- err = grpc_error_add_child(
- err, GRPC_ERROR_CREATE("Missing :authority header"));
- }
- if (!calld->seen_method) {
- err = grpc_error_add_child(err,
- GRPC_ERROR_CREATE("Missing :method header"));
- }
- if (!calld->seen_scheme) {
- err = grpc_error_add_child(err,
- GRPC_ERROR_CREATE("Missing :scheme header"));
- }
- if (!calld->seen_te_trailers) {
- err = grpc_error_add_child(
- err, GRPC_ERROR_CREATE("Missing te: trailers header"));
- }
- /* Error this call out */
- if (grpc_http_trace) {
- const char *error_str = grpc_error_string(err);
- gpr_log(GPR_ERROR, "Invalid http2 headers: %s", error_str);
- grpc_error_free_string(error_str);
- }
- grpc_call_element_send_cancel(exec_ctx, elem);
- }
+ err = server_filter_incoming_metadata(exec_ctx, elem,
+ calld->recv_initial_metadata);
} else {
GRPC_ERROR_REF(err);
}
- calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, err);
- GRPC_ERROR_UNREF(err);
+ grpc_closure_run(exec_ctx, calld->on_done_recv, err);
}
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -273,13 +275,23 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
- if (op->send_initial_metadata != NULL && !calld->sent_status) {
- calld->sent_status = 1;
- grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->status,
- GRPC_MDELEM_STATUS_200);
- grpc_metadata_batch_add_tail(
- op->send_initial_metadata, &calld->content_type,
- GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
+ if (op->send_initial_metadata != NULL) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *error_name = "Failed sending initial metadata";
+ add_error(error_name, &error, grpc_metadata_batch_add_head(
+ exec_ctx, op->send_initial_metadata,
+ &calld->status, GRPC_MDELEM_STATUS_200));
+ add_error(error_name, &error,
+ grpc_metadata_batch_add_tail(
+ exec_ctx, op->send_initial_metadata, &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC));
+ add_error(error_name, &error,
+ server_filter_outgoing_metadata(exec_ctx, elem,
+ op->send_initial_metadata));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ return;
+ }
}
if (op->recv_initial_metadata) {
@@ -306,8 +318,12 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
if (op->send_trailing_metadata) {
- grpc_metadata_batch_filter(exec_ctx, op->send_trailing_metadata,
- server_filter_outgoing_metadata, elem);
+ grpc_error *error = server_filter_outgoing_metadata(
+ exec_ctx, elem, op->send_trailing_metadata);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ return;
+ }
}
}
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index 862090b371..d9862c5252 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -58,7 +58,7 @@ static void message_size_limits_free(grpc_exec_ctx* exec_ctx, void* value) {
gpr_free(value);
}
-static const grpc_mdstr_hash_table_vtable message_size_limits_vtable = {
+static const grpc_slice_hash_table_vtable message_size_limits_vtable = {
message_size_limits_free, message_size_limits_copy};
static void* message_size_limits_create_from_json(const grpc_json* json) {
@@ -101,7 +101,7 @@ typedef struct channel_data {
int max_send_size;
int max_recv_size;
// Maps path names to message_size_limits structs.
- grpc_mdstr_hash_table* method_limit_table;
+ grpc_slice_hash_table* method_limit_table;
} channel_data;
// Callback invoked when we receive a message. Here we check the max
@@ -142,10 +142,12 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->send_message->length, calld->max_send_size);
- grpc_slice message = grpc_slice_from_copied_string(message_string);
+ grpc_transport_stream_op_finish_with_failure(
+ exec_ctx, op, grpc_error_set_int(GRPC_ERROR_CREATE(message_string),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_INVALID_ARGUMENT));
gpr_free(message_string);
- grpc_call_element_send_close_with_message(
- exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT, &message);
+ return;
}
// Inject callback for receiving a message.
if (op->recv_message_ready != NULL) {
@@ -243,7 +245,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {
channel_data* chand = elem->channel_data;
- grpc_mdstr_hash_table_unref(exec_ctx, chand->method_limit_table);
+ grpc_slice_hash_table_unref(exec_ctx, chand->method_limit_table);
}
const grpc_channel_filter grpc_message_size_filter = {
diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h
index 1f9cc15f23..58dfe628b4 100644
--- a/src/core/lib/compression/algorithm_metadata.h
+++ b/src/core/lib/compression/algorithm_metadata.h
@@ -38,16 +38,16 @@
#include "src/core/lib/transport/metadata.h"
/** Return compression algorithm based metadata value */
-grpc_mdstr *grpc_compression_algorithm_mdstr(
+grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm);
/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
-grpc_mdelem *grpc_compression_encoding_mdelem(
+grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm);
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
-grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
- grpc_mdstr *str);
+grpc_compression_algorithm grpc_compression_algorithm_from_slice(
+ grpc_slice str);
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.c
index 54efb5e855..ce4f597af5 100644
--- a/src/core/lib/compression/compression.c
+++ b/src/core/lib/compression/compression.c
@@ -41,30 +41,24 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/transport/static_metadata.h"
-int grpc_compression_algorithm_parse(const char *name, size_t name_length,
+int grpc_compression_algorithm_parse(grpc_slice name,
grpc_compression_algorithm *algorithm) {
/* we use strncmp not only because it's safer (even though in this case it
* doesn't matter, given that we are comparing against string literals, but
* because this way we needn't have "name" nil-terminated (useful for slice
* data, for example) */
- GRPC_API_TRACE(
- "grpc_compression_algorithm_parse("
- "name=%*.*s, name_length=%lu, algorithm=%p)",
- 5, ((int)name_length, (int)name_length, name, (unsigned long)name_length,
- algorithm));
- if (name_length == 0) {
- return 0;
- }
- if (strncmp(name, "identity", name_length) == 0) {
+ if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) {
*algorithm = GRPC_COMPRESS_NONE;
- } else if (strncmp(name, "gzip", name_length) == 0) {
+ return 1;
+ } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) {
*algorithm = GRPC_COMPRESS_GZIP;
- } else if (strncmp(name, "deflate", name_length) == 0) {
+ return 1;
+ } else if (grpc_slice_eq(name, GRPC_MDSTR_DEFLATE)) {
*algorithm = GRPC_COMPRESS_DEFLATE;
+ return 1;
} else {
return 0;
}
- return 1;
}
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
@@ -87,15 +81,15 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
return 0;
}
-grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
- grpc_mdstr *str) {
- if (str == GRPC_MDSTR_IDENTITY) return GRPC_COMPRESS_NONE;
- if (str == GRPC_MDSTR_DEFLATE) return GRPC_COMPRESS_DEFLATE;
- if (str == GRPC_MDSTR_GZIP) return GRPC_COMPRESS_GZIP;
+grpc_compression_algorithm grpc_compression_algorithm_from_slice(
+ grpc_slice str) {
+ if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE;
+ if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE;
+ if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP;
return GRPC_COMPRESS_ALGORITHMS_COUNT;
}
-grpc_mdstr *grpc_compression_algorithm_mdstr(
+grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
@@ -105,12 +99,12 @@ grpc_mdstr *grpc_compression_algorithm_mdstr(
case GRPC_COMPRESS_GZIP:
return GRPC_MDSTR_GZIP;
case GRPC_COMPRESS_ALGORITHMS_COUNT:
- return NULL;
+ return grpc_empty_slice();
}
- return NULL;
+ return grpc_empty_slice();
}
-grpc_mdelem *grpc_compression_encoding_mdelem(
+grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
@@ -122,7 +116,7 @@ grpc_mdelem *grpc_compression_encoding_mdelem(
default:
break;
}
- return NULL;
+ return GRPC_MDNULL;
}
void grpc_compression_options_init(grpc_compression_options *opts) {
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 440817c5a6..f4f6f3c27a 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -156,7 +156,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg);
- grpc_error_free_string(msg);
+
c->func(exec_ctx, c->arg, NULL);
} else {
grpc_channel_args_destroy(exec_ctx, args->args);
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index ec197c7d82..509c1ff95d 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -34,6 +34,7 @@
#include "src/core/lib/iomgr/closure.h"
#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include "src/core/lib/profiling/timers.h"
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 7ffec29ebb..2510d50b42 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -66,6 +66,7 @@ typedef struct grpc_closure_scheduler_vtable {
grpc_error *error);
void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
+ const char *name;
} grpc_closure_scheduler_vtable;
/** Abstract type that can schedule closures for execution */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index c26a73b2b7..ba6c7087a9 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -86,13 +86,17 @@ static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
grpc_error *error);
static const grpc_closure_scheduler_vtable scheduler_uncovered = {
- combiner_exec_uncovered, combiner_exec_uncovered};
+ combiner_exec_uncovered, combiner_exec_uncovered,
+ "combiner:immediately:uncovered"};
static const grpc_closure_scheduler_vtable scheduler_covered = {
- combiner_exec_covered, combiner_exec_covered};
+ combiner_exec_covered, combiner_exec_covered,
+ "combiner:immediately:covered"};
static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
- combiner_finally_exec_uncovered, combiner_finally_exec_uncovered};
+ combiner_finally_exec_uncovered, combiner_finally_exec_uncovered,
+ "combiner:finally:uncovered"};
static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
- combiner_finally_exec_covered, combiner_finally_exec_covered};
+ combiner_finally_exec_covered, combiner_finally_exec_covered,
+ "combiner:finally:covered"};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index f6bb3a0477..dbe5b139f9 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -33,13 +33,10 @@
#include "src/core/lib/iomgr/error.h"
-#include <inttypes.h>
-#include <stdbool.h>
#include <string.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/avl.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -48,6 +45,7 @@
#include <grpc/support/log_windows.h>
#endif
+#include "src/core/lib/iomgr/error_internal.h"
#include "src/core/lib/profiling/timers.h"
static void destroy_integer(void *key) {}
@@ -128,6 +126,10 @@ static const char *error_int_name(grpc_error_ints key) {
static const char *error_str_name(grpc_error_strs key) {
switch (key) {
+ case GRPC_ERROR_STR_KEY:
+ return "key";
+ case GRPC_ERROR_STR_VALUE:
+ return "value";
case GRPC_ERROR_STR_DESCRIPTION:
return "description";
case GRPC_ERROR_STR_OS_ERROR:
@@ -160,16 +162,7 @@ static const char *error_time_name(grpc_error_times key) {
GPR_UNREACHABLE_CODE(return "unknown");
}
-struct grpc_error {
- gpr_refcount refs;
- gpr_avl ints;
- gpr_avl strs;
- gpr_avl times;
- gpr_avl errs;
- uintptr_t next_err;
-};
-
-static bool is_special(grpc_error *err) {
+bool grpc_error_is_special(grpc_error *err) {
return err == GRPC_ERROR_NONE || err == GRPC_ERROR_OOM ||
err == GRPC_ERROR_CANCELLED;
}
@@ -177,7 +170,7 @@ static bool is_special(grpc_error *err) {
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
const char *func) {
- if (is_special(err)) return err;
+ if (grpc_error_is_special(err)) return err;
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
err->refs.count, err->refs.count + 1, file, line, func);
gpr_ref(&err->refs);
@@ -185,25 +178,26 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
}
#else
grpc_error *grpc_error_ref(grpc_error *err) {
- if (is_special(err)) return err;
+ if (grpc_error_is_special(err)) return err;
gpr_ref(&err->refs);
return err;
}
#endif
static void error_destroy(grpc_error *err) {
- GPR_ASSERT(!is_special(err));
+ GPR_ASSERT(!grpc_error_is_special(err));
gpr_avl_unref(err->ints);
gpr_avl_unref(err->strs);
gpr_avl_unref(err->errs);
gpr_avl_unref(err->times);
+ gpr_free((void *)gpr_atm_acq_load(&err->error_string));
gpr_free(err);
}
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
void grpc_error_unref(grpc_error *err, const char *file, int line,
const char *func) {
- if (is_special(err)) return;
+ if (grpc_error_is_special(err)) return;
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
err->refs.count, err->refs.count - 1, file, line, func);
if (gpr_unref(&err->refs)) {
@@ -212,7 +206,7 @@ void grpc_error_unref(grpc_error *err, const char *file, int line,
}
#else
void grpc_error_unref(grpc_error *err) {
- if (is_special(err)) return;
+ if (grpc_error_is_special(err)) return;
if (gpr_unref(&err->refs)) {
error_destroy(err);
}
@@ -247,6 +241,7 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
err->times = gpr_avl_add(gpr_avl_create(&avl_vtable_times),
(void *)(uintptr_t)GRPC_ERROR_TIME_CREATED,
box_time(gpr_now(GPR_CLOCK_REALTIME)));
+ gpr_atm_no_barrier_store(&err->error_string, 0);
gpr_ref_init(&err->refs, 1);
GPR_TIMER_END("grpc_error_create", 0);
return err;
@@ -255,9 +250,10 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
static grpc_error *copy_error_and_unref(grpc_error *in) {
GPR_TIMER_BEGIN("copy_error_and_unref", 0);
grpc_error *out;
- if (is_special(in)) {
+ if (grpc_error_is_special(in)) {
if (in == GRPC_ERROR_NONE)
- out = GRPC_ERROR_CREATE("no error");
+ out = grpc_error_set_int(GRPC_ERROR_CREATE("no error"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK);
else if (in == GRPC_ERROR_OOM)
out = GRPC_ERROR_CREATE("oom");
else if (in == GRPC_ERROR_CANCELLED)
@@ -275,6 +271,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
out->strs = gpr_avl_ref(in->strs);
out->errs = gpr_avl_ref(in->errs);
out->times = gpr_avl_ref(in->times);
+ gpr_atm_no_barrier_store(&out->error_string, 0);
out->next_err = in->next_err;
gpr_ref_init(&out->refs, 1);
GRPC_ERROR_UNREF(in);
@@ -292,14 +289,29 @@ grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
return new;
}
+typedef struct {
+ grpc_error *error;
+ grpc_status_code code;
+ const char *msg;
+} special_error_status_map;
+static special_error_status_map error_status_map[] = {
+ {GRPC_ERROR_NONE, GRPC_STATUS_OK, NULL},
+ {GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "Cancelled"},
+ {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"},
+};
+
bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
GPR_TIMER_BEGIN("grpc_error_get_int", 0);
void *pp;
- if (is_special(err)) {
- if (err == GRPC_ERROR_CANCELLED && which == GRPC_ERROR_INT_GRPC_STATUS) {
- *p = GRPC_STATUS_CANCELLED;
- GPR_TIMER_END("grpc_error_get_int", 0);
- return true;
+ if (grpc_error_is_special(err)) {
+ if (which == GRPC_ERROR_INT_GRPC_STATUS) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) {
+ if (error_status_map[i].error == err) {
+ if (p != NULL) *p = error_status_map[i].code;
+ GPR_TIMER_END("grpc_error_get_int", 0);
+ return true;
+ }
+ }
}
GPR_TIMER_END("grpc_error_get_int", 0);
return false;
@@ -324,66 +336,17 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
}
const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) {
- if (is_special(err)) return NULL;
- return gpr_avl_get(err->strs, (void *)(uintptr_t)which);
-}
-
-typedef struct {
- grpc_error *error;
- grpc_status_code code;
- const char *msg;
-} special_error_status_map;
-static special_error_status_map error_status_map[] = {
- {GRPC_ERROR_NONE, GRPC_STATUS_OK, ""},
- {GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "RPC cancelled"},
- {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"},
-};
-
-static grpc_error *recursively_find_error_with_status(grpc_error *error,
- intptr_t *status) {
- // If the error itself has a status code, return it.
- if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, status)) {
- return error;
- }
- // Otherwise, search through its children.
- intptr_t key = 0;
- while (true) {
- grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++);
- if (child_error == NULL) break;
- grpc_error *result =
- recursively_find_error_with_status(child_error, status);
- if (result != NULL) return result;
- }
- return NULL;
-}
-
-void grpc_error_get_status(grpc_error *error, grpc_status_code *code,
- const char **msg) {
- // Handle special errors via the static map.
- for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); ++i) {
- if (error == error_status_map[i].error) {
- *code = error_status_map[i].code;
- *msg = error_status_map[i].msg;
- return;
+ if (grpc_error_is_special(err)) {
+ if (which == GRPC_ERROR_STR_GRPC_MESSAGE) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) {
+ if (error_status_map[i].error == err) {
+ return error_status_map[i].msg;
+ }
+ }
}
+ return NULL;
}
- // Populate code.
- // Start with the parent error and recurse through the tree of children
- // until we find the first one that has a status code.
- intptr_t status = GRPC_STATUS_UNKNOWN; // Default in case we don't find one.
- grpc_error *found_error = recursively_find_error_with_status(error, &status);
- *code = (grpc_status_code)status;
- // Now populate msg.
- // If we found an error with a status code above, use that; otherwise,
- // fall back to using the parent error.
- if (found_error == NULL) found_error = error;
- // If the error has a status message, use it. Otherwise, fall back to
- // the error description.
- *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE);
- if (*msg == NULL) {
- *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION);
- if (*msg == NULL) *msg = "uknown error"; // Just in case.
- }
+ return gpr_avl_get(err->strs, (void *)(uintptr_t)which);
}
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
@@ -535,7 +498,6 @@ static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap,
*first = false;
const char *e = grpc_error_string(n->value);
append_str(e, s, sz, cap);
- grpc_error_free_string(e);
add_errs(n->right, s, sz, cap, first);
}
@@ -557,7 +519,7 @@ static int cmp_kvs(const void *a, const void *b) {
return strcmp(ka->key, kb->key);
}
-static const char *finish_kvs(kv_pairs *kvs) {
+static char *finish_kvs(kv_pairs *kvs) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
@@ -578,19 +540,18 @@ static const char *finish_kvs(kv_pairs *kvs) {
return s;
}
-void grpc_error_free_string(const char *str) {
- if (str == no_error_string) return;
- if (str == oom_error_string) return;
- if (str == cancelled_error_string) return;
- gpr_free((char *)str);
-}
-
const char *grpc_error_string(grpc_error *err) {
GPR_TIMER_BEGIN("grpc_error_string", 0);
if (err == GRPC_ERROR_NONE) return no_error_string;
if (err == GRPC_ERROR_OOM) return oom_error_string;
if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string;
+ void *p = (void *)gpr_atm_acq_load(&err->error_string);
+ if (p != NULL) {
+ GPR_TIMER_END("grpc_error_string", 0);
+ return p;
+ }
+
kv_pairs kvs;
memset(&kvs, 0, sizeof(kvs));
@@ -603,7 +564,13 @@ const char *grpc_error_string(grpc_error *err) {
qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs);
- const char *out = finish_kvs(&kvs);
+ char *out = finish_kvs(&kvs);
+
+ if (!gpr_atm_rel_cas(&err->error_string, 0, (gpr_atm)out)) {
+ gpr_free(out);
+ out = (char *)gpr_atm_no_barrier_load(&err->error_string);
+ }
+
GPR_TIMER_END("grpc_error_string", 0);
return out;
}
@@ -638,7 +605,6 @@ bool grpc_log_if_error(const char *what, grpc_error *error, const char *file,
if (error == GRPC_ERROR_NONE) return true;
const char *msg = grpc_error_string(error);
gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "%s: %s", what, msg);
- grpc_error_free_string(msg);
GRPC_ERROR_UNREF(error);
return false;
}
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index f3f3b80a09..ffacdac393 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -124,7 +124,11 @@ typedef enum {
/// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
/// which data was queued for writing when the error occurred
- GRPC_ERROR_STR_QUEUED_BUFFERS
+ GRPC_ERROR_STR_QUEUED_BUFFERS,
+ /// key associated with the error
+ GRPC_ERROR_STR_KEY,
+ /// value associated with the error
+ GRPC_ERROR_STR_VALUE,
} grpc_error_strs;
typedef enum {
@@ -141,7 +145,6 @@ typedef enum {
#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
-void grpc_error_free_string(const char *str);
/// Create an error - but use GRPC_ERROR_CREATE instead
grpc_error *grpc_error_create(const char *file, int line, const char *desc,
@@ -189,12 +192,6 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
/// Caller does NOT own return value.
const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which);
-/// A utility function to get the status code and message to be returned
-/// to the application. If not set in the top-level message, looks
-/// through child errors until it finds the first one with these attributes.
-void grpc_error_get_status(grpc_error *error, grpc_status_code *code,
- const char **msg);
-
/// Add a child error: an error that is believed to have contributed to this
/// error occurring. Allows root causing high level errors from lower level
/// errors that contributed to them.
diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h
new file mode 100644
index 0000000000..1c89ead4ed
--- /dev/null
+++ b/src/core/lib/iomgr/error_internal.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H
+#define GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <grpc/support/avl.h>
+
+struct grpc_error {
+ gpr_refcount refs;
+ gpr_avl ints;
+ gpr_avl strs;
+ gpr_avl times;
+ gpr_avl errs;
+ uintptr_t next_err;
+ gpr_atm error_string;
+};
+
+bool grpc_error_is_special(grpc_error *err);
+
+#endif /* GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index d6664aead2..39b5c0032e 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -321,7 +321,7 @@ gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue};
+ workqueue_enqueue, workqueue_enqueue, "workqueue"};
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 2975d619e1..c106ba5400 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -52,6 +52,8 @@
* tests */
grpc_poll_function_type grpc_poll_function = poll;
+grpc_wakeup_fd grpc_global_wakeup_fd;
+
static const grpc_event_engine_vtable *g_event_engine;
static const char *g_poll_strategy_name = NULL;
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 6aa788f8e5..83bb436bd0 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -42,11 +42,16 @@
#include "src/core/lib/profiling/timers.h"
bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
- if (!exec_ctx->cached_ready_to_finish) {
- exec_ctx->cached_ready_to_finish = exec_ctx->check_ready_to_finish(
- exec_ctx, exec_ctx->check_ready_to_finish_arg);
+ if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
+ if (exec_ctx->check_ready_to_finish(exec_ctx,
+ exec_ctx->check_ready_to_finish_arg)) {
+ exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
+ return true;
+ }
+ return false;
+ } else {
+ return true;
}
- return exec_ctx->cached_ready_to_finish;
}
bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
@@ -82,7 +87,7 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
}
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
- exec_ctx->cached_ready_to_finish = true;
+ exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
grpc_exec_ctx_flush(exec_ctx);
}
@@ -101,6 +106,6 @@ void grpc_exec_ctx_global_init(void) {}
void grpc_exec_ctx_global_shutdown(void) {}
static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
- exec_ctx_run, exec_ctx_sched};
+ exec_ctx_run, exec_ctx_sched, "exec_ctx"};
static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index e566f1b3e8..f99a0fee5f 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -43,6 +43,13 @@
typedef struct grpc_workqueue grpc_workqueue;
typedef struct grpc_combiner grpc_combiner;
+/* This exec_ctx is ready to return: either pre-populated, or cached as soon as
+ the finish_check returns true */
+#define GRPC_EXEC_CTX_FLAG_IS_FINISHED 1
+/* The exec_ctx's thread is (potentially) owned by a call or channel: care
+ should be given to not delete said call/channel from this exec_ctx */
+#define GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP 2
+
/** Execution context.
* A bag of data that collects information along a callstack.
* Generally created at public API entry points, and passed down as
@@ -63,36 +70,26 @@ typedef struct grpc_combiner grpc_combiner;
* - Instances are always passed as the first argument to a function that
* takes it, and always as a pointer (grpc_exec_ctx is never copied).
*/
-#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
/** last active combiner in the active combiner list */
grpc_combiner *last_combiner;
- bool cached_ready_to_finish;
+ uintptr_t flags;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
-#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
- { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check }
-#else
-struct grpc_exec_ctx {
- bool cached_ready_to_finish;
- void *check_ready_to_finish_arg;
- bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
-};
-#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
- { false, finish_check_arg, finish_check }
-#endif
+#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
+ { GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, finish_check_arg, finish_check }
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
- GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
+ GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL)
extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 852775564f..a5b62aa888 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -158,7 +158,7 @@ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
gpr_mu_destroy(&g_executor.mu);
}
-static const grpc_closure_scheduler_vtable executor_vtable = {executor_push,
- executor_push};
+static const grpc_closure_scheduler_vtable executor_vtable = {
+ executor_push, executor_push, "executor"};
static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c
index 217bc5da59..f40c8b28cc 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.c
@@ -47,7 +47,7 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
grpc_slice *output) {
unsigned char *contents = NULL;
size_t contents_size = 0;
- grpc_slice result = gpr_empty_slice();
+ grpc_slice result = grpc_empty_slice();
FILE *file;
size_t bytes_read = 0;
grpc_error *error = GRPC_ERROR_NONE;
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 19a16b34ed..8a7d6cc79e 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -397,11 +397,15 @@ static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
}
}
+static const grpc_slice_refcount_vtable ru_slice_vtable = {
+ ru_slice_ref, ru_slice_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+
static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
size_t size) {
ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
- rc->base.ref = ru_slice_ref;
- rc->base.unref = ru_slice_unref;
+ rc->base.vtable = &ru_slice_vtable;
+ rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
rc->resource_user = resource_user;
rc->size = size;
@@ -719,7 +723,7 @@ grpc_resource_user *grpc_resource_user_create(
return resource_user;
}
-grpc_resource_quota *grpc_resource_user_get_quota(
+grpc_resource_quota *grpc_resource_user_quota(
grpc_resource_user *resource_user) {
return resource_user->resource_quota;
}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index e7ba144188..b9f62cbf83 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -94,13 +94,17 @@ typedef struct grpc_resource_user grpc_resource_user;
grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota *resource_quota, const char *name);
+
+/* Returns a borrowed reference to the underlying resource quota for this
+ resource user. */
+grpc_resource_quota *grpc_resource_user_quota(
+ grpc_resource_user *resource_user);
+
void grpc_resource_user_ref(grpc_resource_user *resource_user);
void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user);
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user);
-grpc_resource_quota *grpc_resource_user_get_quota(
- grpc_resource_user *resource_user);
/* Allocate from the resource user (and its quota).
If optional_on_done is NULL, then allocate immediately. This may push the
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index 9a77c92016..16b0f4e73c 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -118,7 +118,6 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
- grpc_error_free_string(str);
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
@@ -178,7 +177,6 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str, str);
- grpc_error_free_string(str);
}
gpr_mu_lock(&ac->mu);
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index ece44978b0..a33e63e845 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -181,7 +181,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
- grpc_error_free_string(str);
+
for (i = 0; i < tcp->incoming_buffer->count; i++) {
char *dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
@@ -435,7 +435,6 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (grpc_tcp_trace) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
- grpc_error_free_string(str);
}
grpc_closure_run(exec_ctx, cb, error);
@@ -485,7 +484,6 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (grpc_tcp_trace) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
- grpc_error_free_string(str);
}
grpc_closure_sched(exec_ctx, cb, error);
}
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index dafe851ce8..bd4b9b2df1 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -343,7 +343,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Skipping on_accept due to error: %s", msg);
- grpc_error_free_string(msg);
+
gpr_mu_unlock(&sp->server->mu);
return;
}
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 3ddc79706b..7f4ea49a1c 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -48,6 +48,7 @@
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
@@ -156,7 +157,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
- grpc_error_free_string(str);
+
for (i = 0; i < tcp->read_slices->count; i++) {
char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index d6e1fe3dcf..a098741b70 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -177,7 +177,7 @@ static grpc_error *create_default_creds_from_path(
grpc_auth_json_key key;
grpc_auth_refresh_token token;
grpc_call_credentials *result = NULL;
- grpc_slice creds_data = gpr_empty_slice();
+ grpc_slice creds_data = grpc_empty_slice();
grpc_error *error = GRPC_ERROR_NONE;
if (creds_path == NULL) {
error = GRPC_ERROR_CREATE("creds_path unset");
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c
index f90d7dce83..7bc5dfb403 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c
@@ -42,7 +42,9 @@
#include <grpc/support/sync.h>
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/validate_metadata.h"
typedef struct {
void *user_data;
@@ -63,7 +65,9 @@ static void plugin_md_request_metadata_ready(void *request,
grpc_status_code status,
const char *error_details) {
/* called from application code */
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(
+ GRPC_EXEC_CTX_FLAG_IS_FINISHED | GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP,
+ NULL, NULL);
grpc_metadata_plugin_request *r = (grpc_metadata_plugin_request *)request;
if (status != GRPC_STATUS_OK) {
if (error_details != NULL) {
@@ -77,13 +81,14 @@ static void plugin_md_request_metadata_ready(void *request,
bool seen_illegal_header = false;
grpc_credentials_md *md_array = NULL;
for (i = 0; i < num_md; i++) {
- if (!grpc_header_key_is_legal(md[i].key, strlen(md[i].key))) {
- gpr_log(GPR_ERROR, "Plugin added invalid metadata key: %s", md[i].key);
+ if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
+ grpc_validate_header_key_is_legal(md[i].key))) {
seen_illegal_header = true;
break;
- } else if (!grpc_is_binary_header(md[i].key, strlen(md[i].key)) &&
- !grpc_header_nonbin_value_is_legal(md[i].value,
- md[i].value_length)) {
+ } else if (!grpc_is_binary_header(md[i].key) &&
+ !GRPC_LOG_IF_ERROR(
+ "validate_metadata_from_plugin",
+ grpc_validate_header_nonbin_value_is_legal(md[i].value))) {
gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
seen_illegal_header = true;
break;
@@ -95,9 +100,8 @@ static void plugin_md_request_metadata_ready(void *request,
} else if (num_md > 0) {
md_array = gpr_malloc(num_md * sizeof(grpc_credentials_md));
for (i = 0; i < num_md; i++) {
- md_array[i].key = grpc_slice_from_copied_string(md[i].key);
- md_array[i].value =
- grpc_slice_from_copied_buffer(md[i].value, md[i].value_length);
+ md_array[i].key = grpc_slice_ref_internal(md[i].key);
+ md_array[i].value = grpc_slice_ref_internal(md[i].value);
}
r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK,
NULL);
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index b7f6fd23e3..cf056e8008 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -45,6 +45,7 @@
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/transport/security_connector.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -54,8 +55,10 @@
/* We can have a per-call credentials. */
typedef struct {
grpc_call_credentials *creds;
- grpc_mdstr *host;
- grpc_mdstr *method;
+ bool have_host;
+ bool have_method;
+ grpc_slice host;
+ grpc_slice method;
/* pollset{_set} bound to this call; if we need to make external
network requests, they should be done under a pollset added to this
pollset_set so that work can progress when this call wants work to progress
@@ -89,14 +92,12 @@ static void reset_auth_metadata_context(
auth_md_context->channel_auth_context = NULL;
}
-static void bubble_up_error(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_status_code status, const char *error_msg) {
- call_data *calld = elem->call_data;
- gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
- grpc_slice error_slice = grpc_slice_from_copied_string(error_msg);
- grpc_transport_stream_op_add_close(exec_ctx, &calld->op, status,
- &error_slice);
- grpc_call_next_op(exec_ctx, elem, &calld->op);
+static void add_error(grpc_error **combined, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*combined == GRPC_ERROR_NONE) {
+ *combined = GRPC_ERROR_CREATE("Client auth metadata plugin error");
+ }
+ *combined = grpc_error_add_child(*combined, error);
}
static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -110,30 +111,37 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_metadata_batch *mdb;
size_t i;
reset_auth_metadata_context(&calld->auth_md_context);
+ grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_CREDENTIALS_OK) {
- bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
- (error_details != NULL && strlen(error_details) > 0)
- ? error_details
- : "Credentials failed to get metadata.");
- return;
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE(error_details != NULL && strlen(error_details) > 0
+ ? error_details
+ : "Credentials failed to get metadata."),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED);
+ } else {
+ GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
+ GPR_ASSERT(op->send_initial_metadata != NULL);
+ mdb = op->send_initial_metadata;
+ for (i = 0; i < num_md; i++) {
+ add_error(&error,
+ grpc_metadata_batch_add_tail(
+ exec_ctx, mdb, &calld->md_links[i],
+ grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(md_elems[i].key),
+ grpc_slice_ref_internal(md_elems[i].value))));
+ }
}
- GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
- GPR_ASSERT(op->send_initial_metadata != NULL);
- mdb = op->send_initial_metadata;
- for (i = 0; i < num_md; i++) {
- grpc_metadata_batch_add_tail(
- mdb, &calld->md_links[i],
- grpc_mdelem_from_slices(exec_ctx,
- grpc_slice_ref_internal(md_elems[i].key),
- grpc_slice_ref_internal(md_elems[i].value)));
+ if (error == GRPC_ERROR_NONE) {
+ grpc_call_next_op(exec_ctx, elem, op);
+ } else {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
}
- grpc_call_next_op(exec_ctx, elem, op);
}
void build_auth_metadata_context(grpc_security_connector *sc,
grpc_auth_context *auth_context,
call_data *calld) {
- char *service = gpr_strdup(grpc_mdstr_as_c_string(calld->method));
+ char *service = grpc_slice_to_c_string(calld->method);
char *last_slash = strrchr(service, '/');
char *method_name = NULL;
char *service_url = NULL;
@@ -149,14 +157,15 @@ void build_auth_metadata_context(grpc_security_connector *sc,
method_name = gpr_strdup(last_slash + 1);
}
if (method_name == NULL) method_name = gpr_strdup("");
+ char *host = grpc_slice_to_c_string(calld->host);
gpr_asprintf(&service_url, "%s://%s%s",
- sc->url_scheme == NULL ? "" : sc->url_scheme,
- grpc_mdstr_as_c_string(calld->host), service);
+ sc->url_scheme == NULL ? "" : sc->url_scheme, host, service);
calld->auth_md_context.service_url = service_url;
calld->auth_md_context.method_name = method_name;
calld->auth_md_context.channel_auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "grpc_auth_metadata_context");
gpr_free(service);
+ gpr_free(host);
}
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
@@ -180,8 +189,12 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
calld->creds = grpc_composite_call_credentials_create(channel_call_creds,
ctx->creds, NULL);
if (calld->creds == NULL) {
- bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
- "Incompatible credentials set on channel and call.");
+ grpc_transport_stream_op_finish_with_failure(
+ exec_ctx, op,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE(
+ "Incompatible credentials set on channel and call."),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
return;
}
} else {
@@ -207,9 +220,14 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
send_security_metadata(exec_ctx, elem, &calld->op);
} else {
char *error_msg;
+ char *host = grpc_slice_to_c_string(calld->host);
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
- grpc_mdstr_as_c_string(calld->host));
- bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED, error_msg);
+ host);
+ gpr_free(host);
+ grpc_call_element_signal_error(
+ exec_ctx, elem, grpc_error_set_int(GRPC_ERROR_CREATE(error_msg),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAUTHENTICATED));
gpr_free(error_msg);
}
}
@@ -247,23 +265,30 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->send_initial_metadata != NULL) {
for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
- grpc_mdelem *md = l->md;
+ grpc_mdelem md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
- if (md->key == GRPC_MDSTR_AUTHORITY) {
- if (calld->host != NULL) GRPC_MDSTR_UNREF(exec_ctx, calld->host);
- calld->host = GRPC_MDSTR_REF(md->value);
- } else if (md->key == GRPC_MDSTR_PATH) {
- if (calld->method != NULL) GRPC_MDSTR_UNREF(exec_ctx, calld->method);
- calld->method = GRPC_MDSTR_REF(md->value);
+ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_AUTHORITY)) {
+ if (calld->have_host) {
+ grpc_slice_unref_internal(exec_ctx, calld->host);
+ }
+ calld->host = grpc_slice_ref_internal(GRPC_MDVALUE(md));
+ calld->have_host = true;
+ } else if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_PATH)) {
+ if (calld->have_method) {
+ grpc_slice_unref_internal(exec_ctx, calld->method);
+ }
+ calld->method = grpc_slice_ref_internal(GRPC_MDVALUE(md));
+ calld->have_method = true;
}
}
- if (calld->host != NULL) {
- const char *call_host = grpc_mdstr_as_c_string(calld->host);
+ if (calld->have_host) {
+ char *call_host = grpc_slice_to_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, chand->auth_context,
on_host_checked, elem);
+ gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_op", 0);
return; /* early exit */
}
@@ -296,11 +321,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
void *ignored) {
call_data *calld = elem->call_data;
grpc_call_credentials_unref(exec_ctx, calld->creds);
- if (calld->host != NULL) {
- GRPC_MDSTR_UNREF(exec_ctx, calld->host);
+ if (calld->have_host) {
+ grpc_slice_unref_internal(exec_ctx, calld->host);
}
- if (calld->method != NULL) {
- GRPC_MDSTR_UNREF(exec_ctx, calld->method);
+ if (calld->have_method) {
+ grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
}
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 5aa26e0577..b09127811b 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -601,7 +601,7 @@ static grpc_security_connector_vtable ssl_server_vtable = {
ssl_server_destroy, ssl_server_check_peer};
static grpc_slice compute_default_pem_root_certs_once(void) {
- grpc_slice result = gpr_empty_slice();
+ grpc_slice result = grpc_empty_slice();
/* First try to load the roots from the environment. */
char *default_root_certs_path =
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 5e75856c7a..37d57d759b 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -124,7 +124,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
}
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
- grpc_error_free_string(msg);
+
if (!h->shutdown) {
// TODO(ctiller): It is currently necessary to shutdown endpoints
// before destroying them, even if we know that there are no
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 5e98ba895d..36e81d6501 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -33,12 +33,13 @@
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/transport/auth_filters.h"
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
+#include "src/core/lib/slice/slice_internal.h"
typedef struct call_data {
grpc_metadata_batch *recv_initial_metadata;
@@ -67,48 +68,34 @@ static grpc_metadata_array metadata_batch_to_md_array(
grpc_metadata_array_init(&result);
for (l = batch->list.head; l != NULL; l = l->next) {
grpc_metadata *usr_md = NULL;
- grpc_mdelem *md = l->md;
- grpc_mdstr *key = md->key;
- grpc_mdstr *value = md->value;
+ grpc_mdelem md = l->md;
+ grpc_slice key = GRPC_MDKEY(md);
+ grpc_slice value = GRPC_MDVALUE(md);
if (result.count == result.capacity) {
result.capacity = GPR_MAX(result.capacity + 8, result.capacity * 2);
result.metadata =
gpr_realloc(result.metadata, result.capacity * sizeof(grpc_metadata));
}
usr_md = &result.metadata[result.count++];
- usr_md->key = grpc_mdstr_as_c_string(key);
- usr_md->value = grpc_mdstr_as_c_string(value);
- usr_md->value_length = GRPC_SLICE_LENGTH(value->slice);
+ usr_md->key = grpc_slice_ref_internal(key);
+ usr_md->value = grpc_slice_ref_internal(value);
}
return result;
}
-static grpc_mdelem *remove_consumed_md(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_mdelem *md) {
+static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
+ void *user_data,
+ grpc_mdelem md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
size_t i;
for (i = 0; i < calld->num_consumed_md; i++) {
const grpc_metadata *consumed_md = &calld->consumed_md[i];
- /* Maybe we could do a pointer comparison but we do not have any guarantee
- that the metadata processor used the same pointers for consumed_md in the
- callback. */
- if (GRPC_SLICE_LENGTH(md->key->slice) != strlen(consumed_md->key) ||
- GRPC_SLICE_LENGTH(md->value->slice) != consumed_md->value_length) {
- continue;
- }
- if (memcmp(GRPC_SLICE_START_PTR(md->key->slice), consumed_md->key,
- GRPC_SLICE_LENGTH(md->key->slice)) == 0 &&
- memcmp(GRPC_SLICE_START_PTR(md->value->slice), consumed_md->value,
- GRPC_SLICE_LENGTH(md->value->slice)) == 0) {
- return NULL; /* Delete. */
- }
+ if (grpc_slice_eq(GRPC_MDKEY(md), consumed_md->key) &&
+ grpc_slice_eq(GRPC_MDVALUE(md), consumed_md->value))
+ return GRPC_FILTERED_REMOVE();
}
- return md;
-}
-
-static void destroy_op(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- gpr_free(arg);
+ return GRPC_FILTERED_MDELEM(md);
}
/* called from application code */
@@ -130,29 +117,33 @@ static void on_md_processing_done(
if (status == GRPC_STATUS_OK) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
- grpc_metadata_batch_filter(&exec_ctx, calld->recv_initial_metadata,
- remove_consumed_md, elem);
+ /* TODO(ctiller): propagate error */
+ GRPC_LOG_IF_ERROR(
+ "grpc_metadata_batch_filter",
+ grpc_metadata_batch_filter(&exec_ctx, calld->recv_initial_metadata,
+ remove_consumed_md, elem,
+ "Response metadata filtering error"));
+ for (size_t i = 0; i < calld->md.count; i++) {
+ grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
+ grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
+ }
grpc_metadata_array_destroy(&calld->md);
grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
} else {
- grpc_slice message;
- grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op));
- memset(close_op, 0, sizeof(*close_op));
+ for (size_t i = 0; i < calld->md.count; i++) {
+ grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
+ grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
+ }
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
- message = grpc_slice_from_copied_string(error_details);
calld->transport_op->send_initial_metadata = NULL;
if (calld->transport_op->send_message != NULL) {
grpc_byte_stream_destroy(&exec_ctx, calld->transport_op->send_message);
calld->transport_op->send_message = NULL;
}
calld->transport_op->send_trailing_metadata = NULL;
- close_op->on_complete =
- grpc_closure_create(destroy_op, close_op, grpc_schedule_on_exec_ctx);
- grpc_transport_stream_op_add_close(&exec_ctx, close_op, status, &message);
- grpc_call_next_op(&exec_ctx, elem, close_op);
grpc_closure_sched(&exec_ctx, calld->on_done_recv,
grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status));
diff --git a/src/core/lib/security/util/b64.c b/src/core/lib/security/util/b64.c
index bbd7e335a6..09c8213131 100644
--- a/src/core/lib/security/util/b64.c
+++ b/src/core/lib/security/util/b64.c
@@ -232,5 +232,5 @@ grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64,
fail:
grpc_slice_unref_internal(exec_ctx, result);
- return gpr_empty_slice();
+ return grpc_empty_slice();
}
diff --git a/src/core/lib/slice/slice.c b/src/core/lib/slice/slice.c
index 76118102ec..1cddf062cd 100644
--- a/src/core/lib/slice/slice.c
+++ b/src/core/lib/slice/slice.c
@@ -41,23 +41,30 @@
#include "src/core/lib/iomgr/exec_ctx.h"
-grpc_slice gpr_empty_slice(void) {
+char *grpc_slice_to_c_string(grpc_slice slice) {
+ char *out = gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
+ memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
+ out[GRPC_SLICE_LENGTH(slice)] = 0;
+ return out;
+}
+
+grpc_slice grpc_empty_slice(void) {
grpc_slice out;
- out.refcount = 0;
+ out.refcount = NULL;
out.data.inlined.length = 0;
return out;
}
grpc_slice grpc_slice_ref_internal(grpc_slice slice) {
if (slice.refcount) {
- slice.refcount->ref(slice.refcount);
+ slice.refcount->vtable->ref(slice.refcount);
}
return slice;
}
void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice) {
if (slice.refcount) {
- slice.refcount->unref(exec_ctx, slice.refcount);
+ slice.refcount->vtable->unref(exec_ctx, slice.refcount);
}
}
@@ -78,16 +85,24 @@ void grpc_slice_unref(grpc_slice slice) {
static void noop_ref(void *unused) {}
static void noop_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
-static grpc_slice_refcount noop_refcount = {noop_ref, noop_unref};
+static const grpc_slice_refcount_vtable noop_refcount_vtable = {
+ noop_ref, noop_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+static grpc_slice_refcount noop_refcount = {&noop_refcount_vtable,
+ &noop_refcount};
-grpc_slice grpc_slice_from_static_string(const char *s) {
+grpc_slice grpc_slice_from_static_buffer(const void *s, size_t len) {
grpc_slice slice;
slice.refcount = &noop_refcount;
slice.data.refcounted.bytes = (uint8_t *)s;
- slice.data.refcounted.length = strlen(s);
+ slice.data.refcounted.length = len;
return slice;
}
+grpc_slice grpc_slice_from_static_string(const char *s) {
+ return grpc_slice_from_static_buffer(s, strlen(s));
+}
+
/* grpc_slice_new support structures - we create a refcount object extended
with the user provided data pointer & destroy function */
typedef struct new_slice_refcount {
@@ -110,14 +125,18 @@ static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
}
}
+static const grpc_slice_refcount_vtable new_slice_vtable = {
+ new_slice_ref, new_slice_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+
grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data) {
grpc_slice slice;
new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
- rc->rc.ref = new_slice_ref;
- rc->rc.unref = new_slice_unref;
+ rc->rc.vtable = &new_slice_vtable;
+ rc->rc.sub_refcount = &rc->rc;
rc->user_destroy = destroy;
rc->user_data = user_data;
@@ -155,14 +174,18 @@ static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
}
}
+static const grpc_slice_refcount_vtable new_with_len_vtable = {
+ new_with_len_ref, new_with_len_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+
grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t)) {
grpc_slice slice;
new_with_len_slice_refcount *rc =
gpr_malloc(sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
- rc->rc.ref = new_with_len_ref;
- rc->rc.unref = new_with_len_unref;
+ rc->rc.vtable = &new_with_len_vtable;
+ rc->rc.sub_refcount = &rc->rc;
rc->user_destroy = destroy;
rc->user_data = p;
rc->user_length = len;
@@ -200,6 +223,10 @@ static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
}
}
+static const grpc_slice_refcount_vtable malloc_vtable = {
+ malloc_ref, malloc_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+
grpc_slice grpc_slice_malloc(size_t length) {
grpc_slice slice;
@@ -219,8 +246,8 @@ grpc_slice grpc_slice_malloc(size_t length) {
this reference. */
gpr_ref_init(&rc->refs, 1);
- rc->base.ref = malloc_ref;
- rc->base.unref = malloc_unref;
+ rc->base.vtable = &malloc_vtable;
+ rc->base.sub_refcount = &rc->base;
/* Build up the slice to be returned. */
/* The slices refcount points back to the allocated block. */
@@ -247,7 +274,7 @@ grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) {
GPR_ASSERT(source.data.refcounted.length >= end);
/* Build the result */
- subset.refcount = source.refcount;
+ subset.refcount = source.refcount->sub_refcount;
/* Point into the source array */
subset.data.refcounted.bytes = source.data.refcounted.bytes + begin;
subset.data.refcounted.length = end - begin;
@@ -273,7 +300,7 @@ grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) {
} else {
subset = grpc_slice_sub_no_ref(source, begin, end);
/* Bump the refcount */
- subset.refcount->ref(subset.refcount);
+ subset.refcount->vtable->ref(subset.refcount);
}
return subset;
}
@@ -300,13 +327,14 @@ grpc_slice grpc_slice_split_tail(grpc_slice *source, size_t split) {
tail_length);
} else {
/* Build the result */
- tail.refcount = source->refcount;
+ tail.refcount = source->refcount->sub_refcount;
/* Bump the refcount */
- tail.refcount->ref(tail.refcount);
+ tail.refcount->vtable->ref(tail.refcount);
/* Point into the source array */
tail.data.refcounted.bytes = source->data.refcounted.bytes + split;
tail.data.refcounted.length = tail_length;
}
+ source->refcount = source->refcount->sub_refcount;
source->data.refcounted.length = split;
}
@@ -332,18 +360,20 @@ grpc_slice grpc_slice_split_head(grpc_slice *source, size_t split) {
head.refcount = NULL;
head.data.inlined.length = (uint8_t)split;
memcpy(head.data.inlined.bytes, source->data.refcounted.bytes, split);
+ source->refcount = source->refcount->sub_refcount;
source->data.refcounted.bytes += split;
source->data.refcounted.length -= split;
} else {
GPR_ASSERT(source->data.refcounted.length >= split);
/* Build the result */
- head.refcount = source->refcount;
+ head.refcount = source->refcount->sub_refcount;
/* Bump the refcount */
- head.refcount->ref(head.refcount);
+ head.refcount->vtable->ref(head.refcount);
/* Point into the source array */
head.data.refcounted.bytes = source->data.refcounted.bytes;
head.data.refcounted.length = split;
+ source->refcount = source->refcount->sub_refcount;
source->data.refcounted.bytes += split;
source->data.refcounted.length -= split;
}
@@ -351,6 +381,19 @@ grpc_slice grpc_slice_split_head(grpc_slice *source, size_t split) {
return head;
}
+int grpc_slice_default_eq_impl(grpc_slice a, grpc_slice b) {
+ return GRPC_SLICE_LENGTH(a) == GRPC_SLICE_LENGTH(b) &&
+ 0 == memcmp(GRPC_SLICE_START_PTR(a), GRPC_SLICE_START_PTR(b),
+ GRPC_SLICE_LENGTH(a));
+}
+
+int grpc_slice_eq(grpc_slice a, grpc_slice b) {
+ if (a.refcount && b.refcount && a.refcount->vtable == b.refcount->vtable) {
+ return a.refcount->vtable->eq(a, b);
+ }
+ return grpc_slice_default_eq_impl(a, b);
+}
+
int grpc_slice_cmp(grpc_slice a, grpc_slice b) {
int d = (int)(GRPC_SLICE_LENGTH(a) - GRPC_SLICE_LENGTH(b));
if (d != 0) return d;
@@ -367,8 +410,55 @@ int grpc_slice_str_cmp(grpc_slice a, const char *b) {
int grpc_slice_is_equivalent(grpc_slice a, grpc_slice b) {
if (a.refcount == NULL || b.refcount == NULL) {
- return grpc_slice_cmp(a, b) == 0;
+ return grpc_slice_eq(a, b);
}
return a.data.refcounted.length == b.data.refcounted.length &&
a.data.refcounted.bytes == b.data.refcounted.bytes;
}
+
+int grpc_slice_buf_start_eq(grpc_slice a, const void *b, size_t len) {
+ if (GRPC_SLICE_LENGTH(a) < len) return 0;
+ return 0 == memcmp(GRPC_SLICE_START_PTR(a), b, len);
+}
+
+int grpc_slice_rchr(grpc_slice s, char c) {
+ const char *b = (const char *)GRPC_SLICE_START_PTR(s);
+ int i;
+ for (i = (int)GRPC_SLICE_LENGTH(s) - 1; i != -1 && b[i] != c; i--)
+ ;
+ return i;
+}
+
+int grpc_slice_chr(grpc_slice s, char c) {
+ const char *b = (const char *)GRPC_SLICE_START_PTR(s);
+ const char *p = memchr(b, c, GRPC_SLICE_LENGTH(s));
+ return p == NULL ? -1 : (int)(p - b);
+}
+
+int grpc_slice_slice(grpc_slice haystack, grpc_slice needle) {
+ size_t haystack_len = GRPC_SLICE_LENGTH(haystack);
+ const uint8_t *haystack_bytes = GRPC_SLICE_START_PTR(haystack);
+ size_t needle_len = GRPC_SLICE_LENGTH(needle);
+ const uint8_t *needle_bytes = GRPC_SLICE_START_PTR(needle);
+
+ if (haystack_len == 0 || needle_len == 0) return -1;
+ if (haystack_len < needle_len) return -1;
+ if (haystack_len == needle_len)
+ return grpc_slice_eq(haystack, needle) ? 0 : -1;
+ if (needle_len == 1) return grpc_slice_chr(haystack, (char)*needle_bytes);
+
+ const uint8_t *last = haystack_bytes + haystack_len - needle_len;
+ for (const uint8_t *cur = haystack_bytes; cur != last; ++cur) {
+ if (0 == memcmp(cur, needle_bytes, needle_len)) {
+ return (int)(cur - haystack_bytes);
+ }
+ }
+ return -1;
+}
+
+grpc_slice grpc_slice_dup(grpc_slice a) {
+ grpc_slice copy = grpc_slice_malloc(GRPC_SLICE_LENGTH(a));
+ memcpy(GRPC_SLICE_START_PTR(copy), GRPC_SLICE_START_PTR(a),
+ GRPC_SLICE_LENGTH(a));
+ return copy;
+}
diff --git a/src/core/lib/transport/mdstr_hash_table.c b/src/core/lib/slice/slice_hash_table.c
index 2791bf653b..46f807f4a5 100644
--- a/src/core/lib/transport/mdstr_hash_table.c
+++ b/src/core/lib/slice/slice_hash_table.c
@@ -29,7 +29,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/slice/slice_hash_table.h"
#include <stdbool.h>
#include <string.h>
@@ -37,70 +37,79 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/metadata.h"
-struct grpc_mdstr_hash_table {
+struct grpc_slice_hash_table {
gpr_refcount refs;
size_t size;
- grpc_mdstr_hash_table_entry* entries;
+ grpc_slice_hash_table_entry* entries;
};
+static bool is_empty(grpc_slice_hash_table_entry* entry) {
+ return entry->vtable == NULL;
+}
+
// Helper function for insert and get operations that performs quadratic
// probing (https://en.wikipedia.org/wiki/Quadratic_probing).
-static size_t grpc_mdstr_hash_table_find_index(
- const grpc_mdstr_hash_table* table, const grpc_mdstr* key,
- bool find_empty) {
+static size_t grpc_slice_hash_table_find_index(
+ const grpc_slice_hash_table* table, const grpc_slice key, bool find_empty) {
+ size_t hash = grpc_slice_hash(key);
for (size_t i = 0; i < table->size; ++i) {
- const size_t idx = (key->hash + i * i) % table->size;
- if (table->entries[idx].key == NULL) return find_empty ? idx : table->size;
- if (table->entries[idx].key == key) return idx;
+ const size_t idx = (hash + i * i) % table->size;
+ if (is_empty(&table->entries[idx])) {
+ return find_empty ? idx : table->size;
+ }
+ if (grpc_slice_eq(table->entries[idx].key, key)) {
+ return idx;
+ }
}
return table->size; // Not found.
}
-static void grpc_mdstr_hash_table_add(
- grpc_mdstr_hash_table* table, grpc_mdstr* key, void* value,
- const grpc_mdstr_hash_table_vtable* vtable) {
+static void grpc_slice_hash_table_add(
+ grpc_slice_hash_table* table, grpc_slice key, void* value,
+ const grpc_slice_hash_table_vtable* vtable) {
GPR_ASSERT(value != NULL);
const size_t idx =
- grpc_mdstr_hash_table_find_index(table, key, true /* find_empty */);
+ grpc_slice_hash_table_find_index(table, key, true /* find_empty */);
GPR_ASSERT(idx != table->size); // Table should never be full.
- grpc_mdstr_hash_table_entry* entry = &table->entries[idx];
- entry->key = GRPC_MDSTR_REF(key);
+ grpc_slice_hash_table_entry* entry = &table->entries[idx];
+ entry->key = grpc_slice_ref_internal(key);
entry->value = vtable->copy_value(value);
entry->vtable = vtable;
}
-grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
- size_t num_entries, grpc_mdstr_hash_table_entry* entries) {
- grpc_mdstr_hash_table* table = gpr_malloc(sizeof(*table));
+grpc_slice_hash_table* grpc_slice_hash_table_create(
+ size_t num_entries, grpc_slice_hash_table_entry* entries) {
+ grpc_slice_hash_table* table = gpr_malloc(sizeof(*table));
memset(table, 0, sizeof(*table));
gpr_ref_init(&table->refs, 1);
// Quadratic probing gets best performance when the table is no more
// than half full.
table->size = num_entries * 2;
- const size_t entry_size = sizeof(grpc_mdstr_hash_table_entry) * table->size;
+ const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
table->entries = gpr_malloc(entry_size);
memset(table->entries, 0, entry_size);
for (size_t i = 0; i < num_entries; ++i) {
- grpc_mdstr_hash_table_entry* entry = &entries[i];
- grpc_mdstr_hash_table_add(table, entry->key, entry->value, entry->vtable);
+ grpc_slice_hash_table_entry* entry = &entries[i];
+ grpc_slice_hash_table_add(table, entry->key, entry->value, entry->vtable);
}
return table;
}
-grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table) {
+grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table) {
if (table != NULL) gpr_ref(&table->refs);
return table;
}
-void grpc_mdstr_hash_table_unref(grpc_exec_ctx* exec_ctx,
- grpc_mdstr_hash_table* table) {
+void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx,
+ grpc_slice_hash_table* table) {
if (table != NULL && gpr_unref(&table->refs)) {
for (size_t i = 0; i < table->size; ++i) {
- grpc_mdstr_hash_table_entry* entry = &table->entries[i];
- if (entry->key != NULL) {
- GRPC_MDSTR_UNREF(exec_ctx, entry->key);
+ grpc_slice_hash_table_entry* entry = &table->entries[i];
+ if (!is_empty(entry)) {
+ grpc_slice_unref_internal(exec_ctx, entry->key);
entry->vtable->destroy_value(exec_ctx, entry->value);
}
}
@@ -109,10 +118,10 @@ void grpc_mdstr_hash_table_unref(grpc_exec_ctx* exec_ctx,
}
}
-void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
- const grpc_mdstr* key) {
+void* grpc_slice_hash_table_get(const grpc_slice_hash_table* table,
+ const grpc_slice key) {
const size_t idx =
- grpc_mdstr_hash_table_find_index(table, key, false /* find_empty */);
+ grpc_slice_hash_table_find_index(table, key, false /* find_empty */);
if (idx == table->size) return NULL; // Not found.
return table->entries[idx].value;
}
diff --git a/src/core/lib/transport/mdstr_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index 57f497ee27..d0c27122d7 100644
--- a/src/core/lib/transport/mdstr_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -29,8 +29,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
-#define GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
+#ifndef GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H
+#define GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H
#include "src/core/lib/transport/metadata.h"
@@ -40,38 +40,38 @@
* (https://en.wikipedia.org/wiki/Open_addressing) with quadratic
* probing (https://en.wikipedia.org/wiki/Quadratic_probing).
*
- * The keys are \a grpc_mdstr objects. The values are arbitrary pointers
+ * The keys are \a grpc_slice objects. The values are arbitrary pointers
* with a common vtable.
*
* Hash tables are intentionally immutable, to avoid the need for locking.
*/
-typedef struct grpc_mdstr_hash_table grpc_mdstr_hash_table;
+typedef struct grpc_slice_hash_table grpc_slice_hash_table;
-typedef struct grpc_mdstr_hash_table_vtable {
- void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value);
- void* (*copy_value)(void* value);
-} grpc_mdstr_hash_table_vtable;
+typedef struct grpc_slice_hash_table_vtable {
+ void (*destroy_value)(grpc_exec_ctx *exec_ctx, void *value);
+ void *(*copy_value)(void *value);
+} grpc_slice_hash_table_vtable;
-typedef struct grpc_mdstr_hash_table_entry {
- grpc_mdstr* key;
- void* value; /* Must not be NULL. */
- const grpc_mdstr_hash_table_vtable* vtable;
-} grpc_mdstr_hash_table_entry;
+typedef struct grpc_slice_hash_table_entry {
+ grpc_slice key;
+ void *value; /* Must not be NULL. */
+ const grpc_slice_hash_table_vtable *vtable;
+} grpc_slice_hash_table_entry;
/** Creates a new hash table of containing \a entries, which is an array
of length \a num_entries.
Creates its own copy of all keys and values from \a entries. */
-grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
- size_t num_entries, grpc_mdstr_hash_table_entry* entries);
+grpc_slice_hash_table *grpc_slice_hash_table_create(
+ size_t num_entries, grpc_slice_hash_table_entry *entries);
-grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table);
-void grpc_mdstr_hash_table_unref(grpc_exec_ctx* exec_ctx,
- grpc_mdstr_hash_table* table);
+grpc_slice_hash_table *grpc_slice_hash_table_ref(grpc_slice_hash_table *table);
+void grpc_slice_hash_table_unref(grpc_exec_ctx *exec_ctx,
+ grpc_slice_hash_table *table);
/** Returns the value from \a table associated with \a key.
Returns NULL if \a key is not found. */
-void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
- const grpc_mdstr* key);
+void *grpc_slice_hash_table_get(const grpc_slice_hash_table *table,
+ const grpc_slice key);
-#endif /* GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H */
+#endif /* GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H */
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.c
new file mode 100644
index 0000000000..7cbd17bffd
--- /dev/null
+++ b/src/core/lib/slice/slice_intern.c
@@ -0,0 +1,344 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/slice/slice_internal.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/iomgr_internal.h" /* for iomgr_abort_on_leaks() */
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/support/murmur_hash.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+#define LOG2_SHARD_COUNT 5
+#define SHARD_COUNT (1 << LOG2_SHARD_COUNT)
+#define INITIAL_SHARD_CAPACITY 8
+
+#define TABLE_IDX(hash, capacity) (((hash) >> LOG2_SHARD_COUNT) % (capacity))
+#define SHARD_IDX(hash) ((hash) & ((1 << LOG2_SHARD_COUNT) - 1))
+
+typedef struct interned_slice_refcount {
+ grpc_slice_refcount base;
+ grpc_slice_refcount sub;
+ size_t length;
+ gpr_atm refcnt;
+ uint32_t hash;
+ struct interned_slice_refcount *bucket_next;
+} interned_slice_refcount;
+
+typedef struct slice_shard {
+ gpr_mu mu;
+ interned_slice_refcount **strs;
+ size_t count;
+ size_t capacity;
+} slice_shard;
+
+/* hash seed: decided at initialization time */
+static uint32_t g_hash_seed;
+static int g_forced_hash_seed = 0;
+
+static slice_shard g_shards[SHARD_COUNT];
+
+typedef struct {
+ uint32_t hash;
+ uint32_t idx;
+} static_metadata_hash_ent;
+
+static static_metadata_hash_ent
+ static_metadata_hash[4 * GRPC_STATIC_MDSTR_COUNT];
+static uint32_t max_static_metadata_hash_probe;
+static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
+
+static void interned_slice_ref(void *p) {
+ interned_slice_refcount *s = p;
+ GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
+}
+
+static void interned_slice_destroy(interned_slice_refcount *s) {
+ slice_shard *shard = &g_shards[SHARD_IDX(s->hash)];
+ gpr_mu_lock(&shard->mu);
+ GPR_ASSERT(0 == gpr_atm_no_barrier_load(&s->refcnt));
+ interned_slice_refcount **prev_next;
+ interned_slice_refcount *cur;
+ for (prev_next = &shard->strs[TABLE_IDX(s->hash, shard->capacity)],
+ cur = *prev_next;
+ cur != s; prev_next = &cur->bucket_next, cur = cur->bucket_next)
+ ;
+ *prev_next = cur->bucket_next;
+ shard->count--;
+ gpr_free(s);
+ gpr_mu_unlock(&shard->mu);
+}
+
+static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
+ interned_slice_refcount *s = p;
+ if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
+ interned_slice_destroy(s);
+ }
+}
+
+static void interned_slice_sub_ref(void *p) {
+ interned_slice_ref(((char *)p) - offsetof(interned_slice_refcount, sub));
+}
+
+static void interned_slice_sub_unref(grpc_exec_ctx *exec_ctx, void *p) {
+ interned_slice_unref(exec_ctx,
+ ((char *)p) - offsetof(interned_slice_refcount, sub));
+}
+
+static uint32_t interned_slice_hash(grpc_slice slice) {
+ interned_slice_refcount *s = (interned_slice_refcount *)slice.refcount;
+ return s->hash;
+}
+
+static int interned_slice_eq(grpc_slice a, grpc_slice b) {
+ return a.refcount == b.refcount;
+}
+
+static const grpc_slice_refcount_vtable interned_slice_vtable = {
+ interned_slice_ref, interned_slice_unref, interned_slice_eq,
+ interned_slice_hash};
+static const grpc_slice_refcount_vtable interned_slice_sub_vtable = {
+ interned_slice_sub_ref, interned_slice_sub_unref,
+ grpc_slice_default_eq_impl, grpc_slice_default_hash_impl};
+
+static void grow_shard(slice_shard *shard) {
+ size_t capacity = shard->capacity * 2;
+ size_t i;
+ interned_slice_refcount **strtab;
+ interned_slice_refcount *s, *next;
+
+ GPR_TIMER_BEGIN("grow_strtab", 0);
+
+ strtab = gpr_malloc(sizeof(interned_slice_refcount *) * capacity);
+ memset(strtab, 0, sizeof(interned_slice_refcount *) * capacity);
+
+ for (i = 0; i < shard->capacity; i++) {
+ for (s = shard->strs[i]; s; s = next) {
+ size_t idx = TABLE_IDX(s->hash, capacity);
+ next = s->bucket_next;
+ s->bucket_next = strtab[idx];
+ strtab[idx] = s;
+ }
+ }
+
+ gpr_free(shard->strs);
+ shard->strs = strtab;
+ shard->capacity = capacity;
+
+ GPR_TIMER_END("grow_strtab", 0);
+}
+
+static grpc_slice materialize(interned_slice_refcount *s) {
+ grpc_slice slice;
+ slice.refcount = &s->base;
+ slice.data.refcounted.bytes = (uint8_t *)(s + 1);
+ slice.data.refcounted.length = s->length;
+ return slice;
+}
+
+uint32_t grpc_slice_default_hash_impl(grpc_slice s) {
+ return gpr_murmur_hash3(GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s),
+ g_hash_seed);
+}
+
+uint32_t grpc_static_slice_hash(grpc_slice s) {
+ return static_metadata_hash_values[GRPC_STATIC_METADATA_INDEX(s)];
+}
+
+int grpc_static_slice_eq(grpc_slice a, grpc_slice b) {
+ return GRPC_STATIC_METADATA_INDEX(a) == GRPC_STATIC_METADATA_INDEX(b);
+}
+
+uint32_t grpc_slice_hash(grpc_slice s) {
+ return s.refcount == NULL ? grpc_slice_default_hash_impl(s)
+ : s.refcount->vtable->hash(s);
+}
+
+grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
+ bool *returned_slice_is_different) {
+ if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
+ return slice;
+ }
+
+ uint32_t hash = grpc_slice_hash(slice);
+ for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) {
+ static_metadata_hash_ent ent =
+ static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
+ if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
+ grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) {
+ *returned_slice_is_different = true;
+ return grpc_static_slice_table[ent.idx];
+ }
+ }
+
+ return slice;
+}
+
+bool grpc_slice_is_interned(grpc_slice slice) {
+ return (slice.refcount && slice.refcount->vtable == &interned_slice_vtable) ||
+ GRPC_IS_STATIC_METADATA_STRING(slice);
+}
+
+grpc_slice grpc_slice_intern(grpc_slice slice) {
+ if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
+ return slice;
+ }
+
+ uint32_t hash = grpc_slice_hash(slice);
+ for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) {
+ static_metadata_hash_ent ent =
+ static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
+ if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
+ grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) {
+ return grpc_static_slice_table[ent.idx];
+ }
+ }
+
+ interned_slice_refcount *s;
+ slice_shard *shard = &g_shards[SHARD_IDX(hash)];
+
+ gpr_mu_lock(&shard->mu);
+
+ /* search for an existing string */
+ size_t idx = TABLE_IDX(hash, shard->capacity);
+ for (s = shard->strs[idx]; s; s = s->bucket_next) {
+ if (s->hash == hash && grpc_slice_eq(slice, materialize(s))) {
+ if (gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) == 0) {
+ /* If we get here, we've added a ref to something that was about to
+ * die - drop it immediately.
+ * The *only* possible path here (given the shard mutex) should be to
+ * drop from one ref back to zero - assert that with a CAS */
+ GPR_ASSERT(gpr_atm_rel_cas(&s->refcnt, 1, 0));
+ /* and treat this as if we were never here... sshhh */
+ } else {
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
+ return materialize(s);
+ }
+ }
+ }
+
+ /* not found: create a new string */
+ /* string data goes after the internal_string header */
+ s = gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice));
+ gpr_atm_rel_store(&s->refcnt, 1);
+ s->length = GRPC_SLICE_LENGTH(slice);
+ s->hash = hash;
+ s->base.vtable = &interned_slice_vtable;
+ s->base.sub_refcount = &s->sub;
+ s->sub.vtable = &interned_slice_sub_vtable;
+ s->sub.sub_refcount = &s->sub;
+ s->bucket_next = shard->strs[idx];
+ shard->strs[idx] = s;
+ memcpy(s + 1, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
+
+ shard->count++;
+
+ if (shard->count > shard->capacity * 2) {
+ grow_shard(shard);
+ }
+
+ gpr_mu_unlock(&shard->mu);
+
+ return materialize(s);
+}
+
+void grpc_test_only_set_slice_hash_seed(uint32_t seed) {
+ g_hash_seed = seed;
+ g_forced_hash_seed = 1;
+}
+
+void grpc_slice_intern_init(void) {
+ if (!g_forced_hash_seed) {
+ g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
+ }
+ for (size_t i = 0; i < SHARD_COUNT; i++) {
+ slice_shard *shard = &g_shards[i];
+ gpr_mu_init(&shard->mu);
+ shard->count = 0;
+ shard->capacity = INITIAL_SHARD_CAPACITY;
+ shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
+ memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
+ }
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
+ static_metadata_hash[i].hash = 0;
+ static_metadata_hash[i].idx = GRPC_STATIC_MDSTR_COUNT;
+ }
+ max_static_metadata_hash_probe = 0;
+ for (size_t i = 0; i < GRPC_STATIC_MDSTR_COUNT; i++) {
+ static_metadata_hash_values[i] =
+ grpc_slice_default_hash_impl(grpc_static_slice_table[i]);
+ for (size_t j = 0; j < GPR_ARRAY_SIZE(static_metadata_hash); j++) {
+ size_t slot = (static_metadata_hash_values[i] + j) %
+ GPR_ARRAY_SIZE(static_metadata_hash);
+ if (static_metadata_hash[slot].idx == GRPC_STATIC_MDSTR_COUNT) {
+ static_metadata_hash[slot].hash = static_metadata_hash_values[i];
+ static_metadata_hash[slot].idx = (uint32_t)i;
+ if (j > max_static_metadata_hash_probe) {
+ max_static_metadata_hash_probe = (uint32_t)j;
+ }
+ break;
+ }
+ }
+ }
+}
+
+void grpc_slice_intern_shutdown(void) {
+ for (size_t i = 0; i < SHARD_COUNT; i++) {
+ slice_shard *shard = &g_shards[i];
+ gpr_mu_destroy(&shard->mu);
+ /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
+ if (shard->count != 0) {
+ gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked",
+ shard->count);
+ for (size_t j = 0; j < shard->capacity; j++) {
+ for (interned_slice_refcount *s = shard->strs[j]; s;
+ s = s->bucket_next) {
+ char *text =
+ grpc_dump_slice(materialize(s), GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "LEAKED: %s", text);
+ gpr_free(text);
+ }
+ }
+ if (grpc_iomgr_abort_on_leaks()) {
+ abort();
+ }
+ }
+ gpr_free(shard->strs);
+ }
+}
diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h
index 6185333ca7..6467b0a8d6 100644
--- a/src/core/lib/slice/slice_internal.h
+++ b/src/core/lib/slice/slice_internal.h
@@ -46,4 +46,19 @@ void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx,
void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *sb);
+/* Check if a slice is interned */
+bool grpc_slice_is_interned(grpc_slice slice);
+
+void grpc_slice_intern_init(void);
+void grpc_slice_intern_shutdown(void);
+void grpc_test_only_set_slice_hash_seed(uint32_t key);
+// if slice matches a static slice, returns the static slice
+// otherwise returns the passed in slice (without reffing it)
+// used for surface boundaries where we might receive an un-interned static
+// string
+grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
+ bool *returned_slice_is_different);
+uint32_t grpc_static_slice_hash(grpc_slice s);
+int grpc_static_slice_eq(grpc_slice a, grpc_slice b);
+
#endif /* GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H */
diff --git a/src/core/lib/slice/slice_string_helpers.c b/src/core/lib/slice/slice_string_helpers.c
index 839c366b32..99695007cc 100644
--- a/src/core/lib/slice/slice_string_helpers.c
+++ b/src/core/lib/slice/slice_string_helpers.c
@@ -88,3 +88,8 @@ void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst) {
grpc_slice_buffer_add_indexed(dst, grpc_slice_ref_internal(str));
}
}
+
+bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result) {
+ return gpr_parse_bytes_to_uint32((const char *)GRPC_SLICE_START_PTR(str),
+ GRPC_SLICE_LENGTH(str), result) != 0;
+}
diff --git a/src/core/lib/slice/slice_string_helpers.h b/src/core/lib/slice/slice_string_helpers.h
index 151c720777..4a4deec6e5 100644
--- a/src/core/lib/slice/slice_string_helpers.h
+++ b/src/core/lib/slice/slice_string_helpers.h
@@ -34,12 +34,15 @@
#ifndef GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H
#define GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H
+#include <stdbool.h>
#include <stddef.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/port_platform.h>
+#include "src/core/lib/support/string.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -51,6 +54,8 @@ char *grpc_dump_slice(grpc_slice slice, uint32_t flags);
* should be a properly initialized instance. */
void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst);
+bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/slice/slice_traits.h b/src/core/lib/slice/slice_traits.h
new file mode 100644
index 0000000000..8a283dc65c
--- /dev/null
+++ b/src/core/lib/slice/slice_traits.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H
+#define GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H
+
+#include <grpc/slice.h>
+#include <stdbool.h>
+
+bool grpc_slice_is_legal_header(grpc_slice s);
+bool grpc_slice_is_legal_nonbin_header(grpc_slice s);
+bool grpc_slice_is_bin_suffixed(grpc_slice s);
+
+#endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 899e8fab3f..63b0683df5 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -56,13 +56,15 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/surface/validate_metadata.h"
+#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/transport.h"
/** The maximum number of concurrent batches possible.
Based upon the maximum number of individually queueable ops in the batch
- api:
+ api:
- initial metadata send
- message send
- status/close send (depending on client/server)
@@ -92,18 +94,21 @@ typedef enum {
} status_source;
typedef struct {
- uint8_t is_set;
- grpc_status_code code;
- grpc_mdstr *details;
+ bool is_set;
+ grpc_error *error;
} received_status;
+#define MAX_ERRORS_PER_BATCH 3
+
typedef struct batch_control {
grpc_call *call;
grpc_cq_completion cq_completion;
grpc_closure finish_batch;
void *notify_tag;
gpr_refcount steps_to_complete;
- grpc_error *error;
+
+ grpc_error *errors[MAX_ERRORS_PER_BATCH];
+ gpr_atm num_errors;
uint8_t send_initial_metadata;
uint8_t send_message;
@@ -185,6 +190,7 @@ struct grpc_call {
grpc_call *sibling_prev;
grpc_slice_buffer_stream sending_stream;
+
grpc_byte_stream *receiving_stream;
grpc_byte_buffer **receiving_buffer;
grpc_slice receiving_slice;
@@ -196,8 +202,7 @@ struct grpc_call {
union {
struct {
grpc_status_code *status;
- char **status_details;
- size_t *status_details_capacity;
+ grpc_slice *status_details;
} client;
struct {
int *cancelled;
@@ -219,13 +224,23 @@ static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
grpc_status_code status,
const char *description);
-static grpc_call_error close_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
- grpc_status_code status,
- const char *description);
+static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_error *error);
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
grpc_error *error);
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
+static void get_final_status(grpc_call *call,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data, grpc_slice *details);
+static void set_status_value_directly(grpc_status_code status, void *dest);
+static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ status_source source, grpc_error *error);
+static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl);
+static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
+static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
+ grpc_error *error);
grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
const grpc_call_create_args *args,
@@ -246,14 +261,16 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = args->server_transport_data == NULL;
- grpc_mdstr *path = NULL;
+ grpc_slice path = grpc_empty_slice();
if (call->is_client) {
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
- if (args->add_initial_metadata[i]->key == GRPC_MDSTR_PATH) {
- path = GRPC_MDSTR_REF(args->add_initial_metadata[i]->value);
+ if (grpc_slice_eq(GRPC_MDKEY(args->add_initial_metadata[i]),
+ GRPC_MDSTR_PATH)) {
+ path = grpc_slice_ref_internal(
+ GRPC_MDVALUE(args->add_initial_metadata[i]));
}
}
call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
@@ -320,10 +337,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
args->server_transport_data, path, call->start_time, send_deadline,
CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
- grpc_status_code status;
- const char *error_str;
- grpc_error_get_status(error, &status, &error_str);
- close_with_status(exec_ctx, call, status, error_str);
+ cancel_with_error(exec_ctx, call, GRPC_ERROR_REF(error));
}
if (args->cq != NULL) {
GPR_ASSERT(
@@ -342,7 +356,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
- if (path != NULL) GRPC_MDSTR_UNREF(exec_ctx, path);
+ grpc_slice_unref_internal(exec_ctx, path);
GPR_TIMER_END("grpc_call_create", 0);
return error;
@@ -377,24 +391,6 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
-static void get_final_status(grpc_call *call,
- void (*set_value)(grpc_status_code code,
- void *user_data),
- void *set_value_user_data) {
- int i;
- for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- set_value(call->status[i].code, set_value_user_data);
- return;
- }
- }
- if (call->is_client) {
- set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
- } else {
- set_value(GRPC_STATUS_OK, set_value_user_data);
- }
-}
-
static void set_status_value_directly(grpc_status_code status, void *dest);
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
@@ -410,11 +406,6 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
gpr_mu_destroy(&c->mu);
- for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (c->status[i].details) {
- GRPC_MDSTR_UNREF(exec_ctx, c->status[i].details);
- }
- }
for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md);
}
@@ -428,42 +419,245 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
}
grpc_channel *channel = c->channel;
- get_final_status(call, set_status_value_directly,
- &c->final_info.final_status);
+ get_final_status(call, set_status_value_directly, &c->final_info.final_status,
+ NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ GRPC_ERROR_UNREF(c->status[i].error);
+ }
+
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
GPR_TIMER_END("destroy_call", 0);
}
-static void set_status_code(grpc_call *call, status_source source,
- uint32_t status) {
- if (call->status[source].is_set) return;
+void grpc_call_destroy(grpc_call *c) {
+ int cancel;
+ grpc_call *parent = c->parent;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ GPR_TIMER_BEGIN("grpc_call_destroy", 0);
+ GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
+
+ if (parent) {
+ gpr_mu_lock(&parent->mu);
+ if (c == parent->first_child) {
+ parent->first_child = c->sibling_next;
+ if (c == parent->first_child) {
+ parent->first_child = NULL;
+ }
+ c->sibling_prev->sibling_next = c->sibling_next;
+ c->sibling_next->sibling_prev = c->sibling_prev;
+ }
+ gpr_mu_unlock(&parent->mu);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
+ }
+
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(!c->destroy_called);
+ c->destroy_called = 1;
+ cancel = !c->received_final_op;
+ gpr_mu_unlock(&c->mu);
+ if (cancel) grpc_call_cancel(c, NULL);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_destroy", 0);
+}
+
+grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
+ GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
+ GPR_ASSERT(!reserved);
+ return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
+ NULL);
+}
+
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op) {
+ grpc_call_element *elem;
+
+ GPR_TIMER_BEGIN("execute_op", 0);
+ elem = CALL_ELEM_FROM_CALL(call, 0);
+ op->context = call->context;
+ elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ GPR_TIMER_END("execute_op", 0);
+}
+
+char *grpc_call_get_peer(grpc_call *call) {
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ char *result;
+ GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
+ result = elem->filter->get_peer(&exec_ctx, elem);
+ if (result == NULL) {
+ result = grpc_channel_get_target(call->channel);
+ }
+ if (result == NULL) {
+ result = gpr_strdup("unknown");
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ return result;
+}
+
+grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
+ return CALL_FROM_TOP_ELEM(elem);
+}
+
+/*******************************************************************************
+ * CANCELLATION
+ */
+
+grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
+ grpc_status_code status,
+ const char *description,
+ void *reserved) {
+ grpc_call_error r;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE(
+ "grpc_call_cancel_with_status("
+ "c=%p, status=%d, description=%s, reserved=%p)",
+ 4, (c, (int)status, description, reserved));
+ GPR_ASSERT(reserved == NULL);
+ gpr_mu_lock(&c->mu);
+ r = cancel_with_status(&exec_ctx, c, status, description);
+ gpr_mu_unlock(&c->mu);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return r;
+}
+
+typedef struct termination_closure {
+ grpc_closure closure;
+ grpc_call *call;
+ grpc_error *error;
+ grpc_transport_stream_op op;
+} termination_closure;
+
+static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp,
+ grpc_error *error) {
+ termination_closure *tc = tcp;
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "termination");
+ gpr_free(tc);
+}
+
+static void send_termination(grpc_exec_ctx *exec_ctx, void *tcp,
+ grpc_error *error) {
+ termination_closure *tc = tcp;
+ memset(&tc->op, 0, sizeof(tc->op));
+ tc->op.cancel_error = tc->error;
+ /* reuse closure to catch completion */
+ grpc_closure_init(&tc->closure, done_termination, tc,
+ grpc_schedule_on_exec_ctx);
+ tc->op.on_complete = &tc->closure;
+ execute_op(exec_ctx, tc->call, &tc->op);
+}
+
+static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
+ termination_closure *tc) {
+ set_status_from_error(exec_ctx, tc->call, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_REF(tc->error));
+ grpc_closure_init(&tc->closure, send_termination, tc,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_INTERNAL_REF(tc->call, "termination");
+ grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE);
+ return GRPC_CALL_OK;
+}
+
+static grpc_call_error terminate_with_error(grpc_exec_ctx *exec_ctx,
+ grpc_call *c, grpc_error *error) {
+ termination_closure *tc = gpr_malloc(sizeof(*tc));
+ memset(tc, 0, sizeof(*tc));
+ tc->call = c;
+ tc->error = error;
+ return terminate_with_status(exec_ctx, tc);
+}
- call->status[source].is_set = 1;
- call->status[source].code = (grpc_status_code)status;
+static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_error *error) {
+ terminate_with_error(exec_ctx, c, error);
}
-static void set_status_details(grpc_exec_ctx *exec_ctx, grpc_call *call,
- status_source source, grpc_mdstr *status) {
- if (call->status[source].details != NULL) {
- GRPC_MDSTR_UNREF(exec_ctx, status);
+static grpc_error *error_from_status(grpc_status_code status,
+ const char *description) {
+ return grpc_error_set_int(
+ grpc_error_set_str(GRPC_ERROR_CREATE(description),
+ GRPC_ERROR_STR_GRPC_MESSAGE, description),
+ GRPC_ERROR_INT_GRPC_STATUS, status);
+}
+
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
+ const char *description) {
+ return terminate_with_error(exec_ctx, c,
+ error_from_status(status, description));
+}
+
+/*******************************************************************************
+ * FINAL STATUS CODE MANIPULATION
+ */
+
+static void get_final_status_from(grpc_call *call, status_source from_source,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data,
+ grpc_slice *details) {
+ grpc_status_code code;
+ const char *msg = NULL;
+ grpc_error_get_status(call->status[from_source].error, call->send_deadline,
+ &code, &msg, NULL);
+
+ set_value(code, set_value_user_data);
+ if (details != NULL) {
+ *details =
+ msg == NULL ? grpc_empty_slice() : grpc_slice_from_copied_string(msg);
+ }
+}
+
+static void get_final_status(grpc_call *call,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data, grpc_slice *details) {
+ int i;
+ /* search for the best status we can present: ideally the error we use has a
+ clearly defined grpc-status, and we'll prefer that. */
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ if (call->status[i].is_set &&
+ grpc_error_has_clear_grpc_status(call->status[i].error)) {
+ get_final_status_from(call, (status_source)i, set_value,
+ set_value_user_data, details);
+ return;
+ }
+ }
+ /* If no clearly defined status exists, search for 'anything' */
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ if (call->status[i].is_set) {
+ get_final_status_from(call, (status_source)i, set_value,
+ set_value_user_data, details);
+ return;
+ }
+ }
+ /* If nothing exists, set some default */
+ if (call->is_client) {
+ set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
} else {
- call->status[source].details = status;
+ set_value(GRPC_STATUS_OK, set_value_user_data);
}
}
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
- grpc_status_code status;
- const char *msg;
- grpc_error_get_status(error, &status, &msg);
- set_status_code(call, source, (uint32_t)status);
- set_status_details(exec_ctx, call, source, grpc_mdstr_from_string(msg));
+ if (call->status[source].is_set) {
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
+ call->status[source].is_set = true;
+ call->status[source].error = error;
}
+/*******************************************************************************
+ * COMPRESSION
+ */
+
static void set_incoming_compression_algorithm(
grpc_call *call, grpc_compression_algorithm algo) {
GPR_ASSERT(algo < GRPC_COMPRESS_ALGORITHMS_COUNT);
@@ -496,7 +690,7 @@ uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
static void destroy_encodings_accepted_by_peer(void *p) { return; }
static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
- grpc_call *call, grpc_mdelem *mdel) {
+ grpc_call *call, grpc_mdelem mdel) {
size_t i;
grpc_compression_algorithm algorithm;
grpc_slice_buffer accept_encoding_parts;
@@ -511,7 +705,7 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
return;
}
- accept_encoding_slice = mdel->value->slice;
+ accept_encoding_slice = GRPC_MDVALUE(mdel);
grpc_slice_buffer_init(&accept_encoding_parts);
grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
@@ -520,15 +714,13 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
for (i = 0; i < accept_encoding_parts.count; i++) {
- const grpc_slice *accept_encoding_entry_slice =
- &accept_encoding_parts.slices[i];
- if (grpc_compression_algorithm_parse(
- (const char *)GRPC_SLICE_START_PTR(*accept_encoding_entry_slice),
- GRPC_SLICE_LENGTH(*accept_encoding_entry_slice), &algorithm)) {
+ grpc_slice accept_encoding_entry_slice = accept_encoding_parts.slices[i];
+ if (grpc_compression_algorithm_parse(accept_encoding_entry_slice,
+ &algorithm)) {
GPR_BITSET(&call->encodings_accepted_by_peer, algorithm);
} else {
char *accept_encoding_entry_str =
- grpc_dump_slice(*accept_encoding_entry_slice, GPR_DUMP_ASCII);
+ grpc_slice_to_c_string(accept_encoding_entry_slice);
gpr_log(GPR_ERROR,
"Invalid entry in accept encoding metadata: '%s'. Ignoring.",
accept_encoding_entry_str);
@@ -551,36 +743,6 @@ uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
return encodings_accepted_by_peer;
}
-static void get_final_details(grpc_call *call, char **out_details,
- size_t *out_details_capacity) {
- int i;
- for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- if (call->status[i].details) {
- grpc_slice details = call->status[i].details->slice;
- size_t len = GRPC_SLICE_LENGTH(details);
- if (len + 1 > *out_details_capacity) {
- *out_details_capacity =
- GPR_MAX(len + 1, *out_details_capacity * 3 / 2);
- *out_details = gpr_realloc(*out_details, *out_details_capacity);
- }
- memcpy(*out_details, GRPC_SLICE_START_PTR(details), len);
- (*out_details)[len] = 0;
- } else {
- goto no_details;
- }
- return;
- }
- }
-
-no_details:
- if (0 == *out_details_capacity) {
- *out_details_capacity = 8;
- *out_details = gpr_malloc(*out_details_capacity);
- }
- **out_details = 0;
-}
-
static grpc_linked_mdelem *linked_from_md(grpc_metadata *md) {
return (grpc_linked_mdelem *)&md->internal_data;
}
@@ -607,24 +769,19 @@ static int prepare_application_metadata(
get_md_elem(metadata, additional_metadata, i, count);
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
- l->md = grpc_mdelem_from_string_and_buffer(
- exec_ctx, md->key, (const uint8_t *)md->value, md->value_length);
- if (!grpc_header_key_is_legal(grpc_mdstr_as_c_string(l->md->key),
- GRPC_MDSTR_LENGTH(l->md->key))) {
- gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
- grpc_mdstr_as_c_string(l->md->key));
+ if (!GRPC_LOG_IF_ERROR("validate_metadata",
+ grpc_validate_header_key_is_legal(md->key))) {
break;
- } else if (!grpc_is_binary_header(grpc_mdstr_as_c_string(l->md->key),
- GRPC_MDSTR_LENGTH(l->md->key)) &&
- !grpc_header_nonbin_value_is_legal(
- grpc_mdstr_as_c_string(l->md->value),
- GRPC_MDSTR_LENGTH(l->md->value))) {
- gpr_log(GPR_ERROR, "attempt to send invalid metadata value");
+ } else if (!grpc_is_binary_header(md->key) &&
+ !GRPC_LOG_IF_ERROR(
+ "validate_metadata",
+ grpc_validate_header_nonbin_value_is_legal(md->value))) {
break;
}
+ l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata *)md);
}
if (i != total_count) {
- for (int j = 0; j <= i; j++) {
+ for (int j = 0; j < i; j++) {
const grpc_metadata *md =
get_md_elem(metadata, additional_metadata, j, count);
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
@@ -636,278 +793,41 @@ static int prepare_application_metadata(
if (call->send_extra_metadata_count == 0) {
prepend_extra_metadata = 0;
} else {
- for (i = 1; i < call->send_extra_metadata_count; i++) {
- call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
- }
- for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
- call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
+ for (i = 0; i < call->send_extra_metadata_count; i++) {
+ GRPC_LOG_IF_ERROR("prepare_application_metadata",
+ grpc_metadata_batch_link_tail(
+ exec_ctx, batch, &call->send_extra_metadata[i]));
}
}
}
- for (i = 1; i < total_count; i++) {
- grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count);
- grpc_metadata *prev_md =
- get_md_elem(metadata, additional_metadata, i - 1, count);
- linked_from_md(md)->prev = linked_from_md(prev_md);
- }
- for (i = 0; i < total_count - 1; i++) {
+ for (i = 0; i < total_count; i++) {
grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count);
- grpc_metadata *next_md =
- get_md_elem(metadata, additional_metadata, i + 1, count);
- linked_from_md(md)->next = linked_from_md(next_md);
- }
-
- switch (prepend_extra_metadata * 2 + (total_count != 0)) {
- case 0:
- /* no prepend, no metadata => nothing to do */
- batch->list.head = batch->list.tail = NULL;
- break;
- case 1: {
- /* metadata, but no prepend */
- grpc_metadata *first_md =
- get_md_elem(metadata, additional_metadata, 0, count);
- grpc_metadata *last_md =
- get_md_elem(metadata, additional_metadata, total_count - 1, count);
- batch->list.head = linked_from_md(first_md);
- batch->list.tail = linked_from_md(last_md);
- batch->list.head->prev = NULL;
- batch->list.tail->next = NULL;
- break;
- }
- case 2:
- /* prepend, but no md */
- batch->list.head = &call->send_extra_metadata[0];
- batch->list.tail =
- &call->send_extra_metadata[call->send_extra_metadata_count - 1];
- batch->list.head->prev = NULL;
- batch->list.tail->next = NULL;
- call->send_extra_metadata_count = 0;
- break;
- case 3: {
- /* prepend AND md */
- grpc_metadata *first_md =
- get_md_elem(metadata, additional_metadata, 0, count);
- grpc_metadata *last_md =
- get_md_elem(metadata, additional_metadata, total_count - 1, count);
- batch->list.head = &call->send_extra_metadata[0];
- call->send_extra_metadata[call->send_extra_metadata_count - 1].next =
- linked_from_md(first_md);
- linked_from_md(first_md)->prev =
- &call->send_extra_metadata[call->send_extra_metadata_count - 1];
- batch->list.tail = linked_from_md(last_md);
- batch->list.head->prev = NULL;
- batch->list.tail->next = NULL;
- call->send_extra_metadata_count = 0;
- break;
- }
- default:
- GPR_UNREACHABLE_CODE(return 0);
+ GRPC_LOG_IF_ERROR(
+ "prepare_application_metadata",
+ grpc_metadata_batch_link_tail(exec_ctx, batch, linked_from_md(md)));
}
+ call->send_extra_metadata_count = 0;
return 1;
}
-void grpc_call_destroy(grpc_call *c) {
- int cancel;
- grpc_call *parent = c->parent;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
- GPR_TIMER_BEGIN("grpc_call_destroy", 0);
- GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
-
- if (parent) {
- gpr_mu_lock(&parent->mu);
- if (c == parent->first_child) {
- parent->first_child = c->sibling_next;
- if (c == parent->first_child) {
- parent->first_child = NULL;
- }
- c->sibling_prev->sibling_next = c->sibling_next;
- c->sibling_next->sibling_prev = c->sibling_prev;
- }
- gpr_mu_unlock(&parent->mu);
- GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
- }
-
- gpr_mu_lock(&c->mu);
- GPR_ASSERT(!c->destroy_called);
- c->destroy_called = 1;
- cancel = !c->received_final_op;
- gpr_mu_unlock(&c->mu);
- if (cancel) grpc_call_cancel(c, NULL);
- GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
- grpc_exec_ctx_finish(&exec_ctx);
- GPR_TIMER_END("grpc_call_destroy", 0);
-}
-
-grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
- GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
- GPR_ASSERT(!reserved);
- return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
- NULL);
-}
-
-grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
- grpc_status_code status,
- const char *description,
- void *reserved) {
- grpc_call_error r;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GRPC_API_TRACE(
- "grpc_call_cancel_with_status("
- "c=%p, status=%d, description=%s, reserved=%p)",
- 4, (c, (int)status, description, reserved));
- GPR_ASSERT(reserved == NULL);
- gpr_mu_lock(&c->mu);
- r = cancel_with_status(&exec_ctx, c, status, description);
- gpr_mu_unlock(&c->mu);
- grpc_exec_ctx_finish(&exec_ctx);
- return r;
-}
-
-typedef struct termination_closure {
- grpc_closure closure;
- grpc_call *call;
- grpc_error *error;
- enum { TC_CANCEL, TC_CLOSE } type;
- grpc_transport_stream_op op;
-} termination_closure;
-
-static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp,
- grpc_error *error) {
- termination_closure *tc = tcp;
- switch (tc->type) {
- case TC_CANCEL:
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "cancel");
- break;
- case TC_CLOSE:
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "close");
- break;
- }
- GRPC_ERROR_UNREF(tc->error);
- gpr_free(tc);
-}
-
-static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
- termination_closure *tc = tcp;
- memset(&tc->op, 0, sizeof(tc->op));
- tc->op.cancel_error = tc->error;
- /* reuse closure to catch completion */
- grpc_closure_init(&tc->closure, done_termination, tc,
- grpc_schedule_on_exec_ctx);
- tc->op.on_complete = &tc->closure;
- execute_op(exec_ctx, tc->call, &tc->op);
-}
-
-static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
- termination_closure *tc = tcp;
- memset(&tc->op, 0, sizeof(tc->op));
- tc->op.close_error = tc->error;
- /* reuse closure to catch completion */
- grpc_closure_init(&tc->closure, done_termination, tc,
- grpc_schedule_on_exec_ctx);
- tc->op.on_complete = &tc->closure;
- execute_op(exec_ctx, tc->call, &tc->op);
-}
-
-static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
- termination_closure *tc) {
- set_status_from_error(exec_ctx, tc->call, STATUS_FROM_API_OVERRIDE,
- tc->error);
-
- if (tc->type == TC_CANCEL) {
- grpc_closure_init(&tc->closure, send_cancel, tc, grpc_schedule_on_exec_ctx);
- GRPC_CALL_INTERNAL_REF(tc->call, "cancel");
- } else if (tc->type == TC_CLOSE) {
- grpc_closure_init(&tc->closure, send_close, tc, grpc_schedule_on_exec_ctx);
- GRPC_CALL_INTERNAL_REF(tc->call, "close");
- }
- grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE);
- return GRPC_CALL_OK;
-}
-
-static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
- grpc_status_code status,
- const char *description) {
- GPR_ASSERT(status != GRPC_STATUS_OK);
- termination_closure *tc = gpr_malloc(sizeof(*tc));
- memset(tc, 0, sizeof(termination_closure));
- tc->type = TC_CANCEL;
- tc->call = c;
- tc->error = grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE(description),
- GRPC_ERROR_STR_GRPC_MESSAGE, description),
- GRPC_ERROR_INT_GRPC_STATUS, status);
-
- return terminate_with_status(exec_ctx, tc);
-}
-
-static grpc_call_error close_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
- grpc_status_code status,
- const char *description) {
- GPR_ASSERT(status != GRPC_STATUS_OK);
- termination_closure *tc = gpr_malloc(sizeof(*tc));
- memset(tc, 0, sizeof(termination_closure));
- tc->type = TC_CLOSE;
- tc->call = c;
- tc->error = grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE(description),
- GRPC_ERROR_STR_GRPC_MESSAGE, description),
- GRPC_ERROR_INT_GRPC_STATUS, status);
-
- return terminate_with_status(exec_ctx, tc);
-}
-
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op *op) {
- grpc_call_element *elem;
-
- GPR_TIMER_BEGIN("execute_op", 0);
- elem = CALL_ELEM_FROM_CALL(call, 0);
- op->context = call->context;
- elem->filter->start_transport_stream_op(exec_ctx, elem, op);
- GPR_TIMER_END("execute_op", 0);
-}
-
-char *grpc_call_get_peer(grpc_call *call) {
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result;
- GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
- result = elem->filter->get_peer(&exec_ctx, elem);
- if (result == NULL) {
- result = grpc_channel_get_target(call->channel);
- }
- if (result == NULL) {
- result = gpr_strdup("unknown");
- }
- grpc_exec_ctx_finish(&exec_ctx);
- return result;
-}
-
-grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
- return CALL_FROM_TOP_ELEM(elem);
-}
-
/* we offset status by a small amount when storing it into transport metadata
as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
*/
#define STATUS_OFFSET 1
static void destroy_status(void *ignored) {}
-static uint32_t decode_status(grpc_mdelem *md) {
+static uint32_t decode_status(grpc_mdelem md) {
uint32_t status;
void *user_data;
- if (md == GRPC_MDELEM_GRPC_STATUS_0) return 0;
- if (md == GRPC_MDELEM_GRPC_STATUS_1) return 1;
- if (md == GRPC_MDELEM_GRPC_STATUS_2) return 2;
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) return 0;
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) return 1;
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) return 2;
user_data = grpc_mdelem_get_user_data(md, destroy_status);
if (user_data != NULL) {
status = ((uint32_t)(intptr_t)user_data) - STATUS_OFFSET;
} else {
- if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
- GRPC_SLICE_LENGTH(md->value->slice),
- &status)) {
+ if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
grpc_mdelem_set_user_data(md, destroy_status,
@@ -916,93 +836,104 @@ static uint32_t decode_status(grpc_mdelem *md) {
return status;
}
-static grpc_compression_algorithm decode_compression(grpc_mdelem *md) {
+static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
grpc_compression_algorithm algorithm =
- grpc_compression_algorithm_from_mdstr(md->value);
+ grpc_compression_algorithm_from_slice(GRPC_MDVALUE(md));
if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) {
- const char *md_c_str = grpc_mdstr_as_c_string(md->value);
+ char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid incoming compression algorithm: '%s'. Interpreting "
"incoming data as uncompressed.",
md_c_str);
+ gpr_free(md_c_str);
return GRPC_COMPRESS_NONE;
}
return algorithm;
}
-static grpc_mdelem *recv_common_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_mdelem *elem) {
- if (elem->key == GRPC_MDSTR_GRPC_STATUS) {
- GPR_TIMER_BEGIN("status", 0);
- set_status_code(call, STATUS_FROM_WIRE, decode_status(elem));
- GPR_TIMER_END("status", 0);
- return NULL;
- } else if (elem->key == GRPC_MDSTR_GRPC_MESSAGE) {
- GPR_TIMER_BEGIN("status-details", 0);
- set_status_details(exec_ctx, call, STATUS_FROM_WIRE,
- GRPC_MDSTR_REF(elem->value));
- GPR_TIMER_END("status-details", 0);
- return NULL;
- }
- return elem;
+static void recv_common_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_metadata_batch *b) {
+ if (b->idx.named.grpc_status != NULL) {
+ uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
+ grpc_error *error =
+ status_code == GRPC_STATUS_OK
+ ? GRPC_ERROR_NONE
+ : grpc_error_set_int(GRPC_ERROR_CREATE("Error received from peer"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ (intptr_t)status_code);
+
+ if (b->idx.named.grpc_message != NULL) {
+ char *msg =
+ grpc_slice_to_c_string(GRPC_MDVALUE(b->idx.named.grpc_message->md));
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+ gpr_free(msg);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
+ } else {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, "");
+ }
+
+ set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
+ }
}
-static grpc_mdelem *publish_app_metadata(grpc_call *call, grpc_mdelem *elem,
- int is_trailing) {
+static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
+ int is_trailing) {
+ if (b->list.count == 0) return;
+ GPR_TIMER_BEGIN("publish_app_metadata", 0);
grpc_metadata_array *dest;
grpc_metadata *mdusr;
- GPR_TIMER_BEGIN("publish_app_metadata", 0);
dest = call->buffered_metadata[is_trailing];
- if (dest->count == dest->capacity) {
- dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
+ if (dest->count + b->list.count > dest->capacity) {
+ dest->capacity =
+ GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2);
dest->metadata =
gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
- mdusr = &dest->metadata[dest->count++];
- mdusr->key = grpc_mdstr_as_c_string(elem->key);
- mdusr->value = grpc_mdstr_as_c_string(elem->value);
- mdusr->value_length = GRPC_SLICE_LENGTH(elem->value->slice);
+ for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) {
+ mdusr = &dest->metadata[dest->count++];
+ /* we pass back borrowed slices that are valid whilst the call is valid */
+ mdusr->key = GRPC_MDKEY(l->md);
+ mdusr->value = GRPC_MDVALUE(l->md);
+ }
GPR_TIMER_END("publish_app_metadata", 0);
- return elem;
}
-static grpc_mdelem *recv_initial_filter(grpc_exec_ctx *exec_ctx, void *args,
- grpc_mdelem *elem) {
- grpc_call *call = args;
- elem = recv_common_filter(exec_ctx, call, elem);
- if (elem == NULL) {
- return NULL;
- } else if (elem->key == GRPC_MDSTR_GRPC_ENCODING) {
+static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_metadata_batch *b) {
+ recv_common_filter(exec_ctx, call, b);
+
+ if (b->idx.named.grpc_encoding != NULL) {
GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
- set_incoming_compression_algorithm(call, decode_compression(elem));
+ set_incoming_compression_algorithm(
+ call, decode_compression(b->idx.named.grpc_encoding->md));
GPR_TIMER_END("incoming_compression_algorithm", 0);
- return NULL;
- } else if (elem->key == GRPC_MDSTR_GRPC_ACCEPT_ENCODING) {
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
+ }
+
+ if (b->idx.named.grpc_accept_encoding != NULL) {
GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
- set_encodings_accepted_by_peer(exec_ctx, call, elem);
+ set_encodings_accepted_by_peer(exec_ctx, call,
+ b->idx.named.grpc_accept_encoding->md);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
- return NULL;
- } else {
- return publish_app_metadata(call, elem, 0);
}
+
+ publish_app_metadata(call, b, false);
}
-static grpc_mdelem *recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
- grpc_mdelem *elem) {
+static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
+ grpc_metadata_batch *b) {
grpc_call *call = args;
- elem = recv_common_filter(exec_ctx, call, elem);
- if (elem == NULL) {
- return NULL;
- } else {
- return publish_app_metadata(call, elem, 1);
- }
+ recv_common_filter(exec_ctx, call, b);
+ publish_app_metadata(call, b, true);
}
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
return CALL_STACK_FROM_CALL(call);
}
-/*
+/*******************************************************************************
* BATCH API IMPLEMENTATION
*/
@@ -1053,14 +984,83 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
+static grpc_error *consolidate_batch_errors(batch_control *bctl) {
+ size_t n = (size_t)gpr_atm_no_barrier_load(&bctl->num_errors);
+ if (n == 0) {
+ return GRPC_ERROR_NONE;
+ } else if (n == 1) {
+ return bctl->errors[0];
+ } else {
+ grpc_error *error =
+ GRPC_ERROR_CREATE_REFERENCING("Call batch failed", bctl->errors, n);
+ for (size_t i = 0; i < n; i++) {
+ GRPC_ERROR_UNREF(bctl->errors[i]);
+ }
+ return error;
+ }
+}
+
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
+ grpc_call *child_call;
+ grpc_call *next_child_call;
grpc_call *call = bctl->call;
- grpc_error *error = bctl->error;
+ grpc_error *error = consolidate_batch_errors(bctl);
+
+ gpr_mu_lock(&call->mu);
+
+ if (error != GRPC_ERROR_NONE) {
+ set_status_from_error(exec_ctx, call, STATUS_FROM_CORE,
+ GRPC_ERROR_REF(error));
+ }
+
+ if (bctl->send_initial_metadata) {
+ grpc_metadata_batch_destroy(
+ exec_ctx,
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = false;
+ }
+ if (bctl->send_final_op) {
+ grpc_metadata_batch_destroy(
+ exec_ctx,
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
+ }
if (bctl->recv_final_op) {
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ recv_trailing_filter(exec_ctx, call, md);
+
+ call->received_final_op = true;
+ /* propagate cancellation to any interested children */
+ child_call = call->first_child;
+ if (child_call != NULL) {
+ do {
+ next_child_call = child_call->sibling_next;
+ if (child_call->cancellation_is_inherited) {
+ GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
+ grpc_call_cancel(child_call, NULL);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
+ }
+ child_call = next_child_call;
+ } while (child_call != call->first_child);
+ }
+
+ if (call->is_client) {
+ get_final_status(call, set_status_value_directly,
+ call->final_op.client.status,
+ call->final_op.client.status_details);
+ } else {
+ get_final_status(call, set_cancelled_value,
+ call->final_op.server.cancelled, NULL);
+ }
+
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
+ gpr_mu_unlock(&call->mu);
+
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
@@ -1077,6 +1077,12 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
}
}
+static void finish_batch_step(grpc_exec_ctx *exec_ctx, batch_control *bctl) {
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
@@ -1087,9 +1093,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
call->receiving_message = 0;
grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
call->receiving_stream = NULL;
- if (gpr_unref(&bctl->steps_to_complete)) {
- post_batch_completion(exec_ctx, bctl);
- }
+ finish_batch_step(exec_ctx, bctl);
return;
}
if (grpc_byte_stream_next(exec_ctx, call->receiving_stream,
@@ -1120,9 +1124,7 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
call->receiving_stream = NULL;
grpc_byte_buffer_destroy(*call->receiving_buffer);
*call->receiving_buffer = NULL;
- if (gpr_unref(&bctl->steps_to_complete)) {
- post_batch_completion(exec_ctx, bctl);
- }
+ finish_batch_step(exec_ctx, bctl);
}
}
@@ -1132,9 +1134,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
if (call->receiving_stream == NULL) {
*call->receiving_buffer = NULL;
call->receiving_message = 0;
- if (gpr_unref(&bctl->steps_to_complete)) {
- post_batch_completion(exec_ctx, bctl);
- }
+ finish_batch_step(exec_ctx, bctl);
} else {
call->test_only_last_message_flags = call->receiving_stream->flags;
if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
@@ -1154,14 +1154,11 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
+ gpr_mu_lock(&bctl->call->mu);
if (error != GRPC_ERROR_NONE) {
- grpc_status_code status;
- const char *msg;
- grpc_error_get_status(error, &status, &msg);
- close_with_status(exec_ctx, call, status, msg);
+ cancel_with_error(exec_ctx, call, GRPC_ERROR_REF(error));
}
- gpr_mu_lock(&bctl->call->mu);
- if (bctl->call->has_initial_md_been_received || error != GRPC_ERROR_NONE ||
+ if (call->has_initial_md_been_received || error != GRPC_ERROR_NONE ||
call->receiving_stream == NULL) {
gpr_mu_unlock(&bctl->call->mu);
process_data_after_md(exec_ctx, bctlp);
@@ -1186,7 +1183,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.",
algo);
gpr_log(GPR_ERROR, "%s", error_msg);
- close_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
@@ -1195,7 +1192,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
gpr_log(GPR_ERROR, "%s", error_msg);
- close_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
} else {
call->incoming_compression_algorithm = algo;
}
@@ -1221,12 +1218,12 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
-static void add_batch_error(batch_control *bctl, grpc_error *error) {
+static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
+ grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
- if (bctl->error == GRPC_ERROR_NONE) {
- bctl->error = GRPC_ERROR_CREATE("Call batch operation failed");
- }
- bctl->error = grpc_error_add_child(bctl->error, error);
+ cancel_with_error(exec_ctx, bctl->call, GRPC_ERROR_REF(error));
+ int idx = (int)gpr_atm_no_barrier_fetch_add(&bctl->num_errors, 1);
+ bctl->errors[idx] = error;
}
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
@@ -1236,12 +1233,13 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
- add_batch_error(bctl, GRPC_ERROR_REF(error));
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
- grpc_metadata_batch_filter(exec_ctx, md, recv_initial_filter, call);
+ recv_initial_filter(exec_ctx, call, md);
+ /* TODO(ctiller): this could be moved into recv_initial_filter now */
GPR_TIMER_BEGIN("validate_filtered_metadata", 0);
validate_filtered_metadata(exec_ctx, bctl);
GPR_TIMER_END("validate_filtered_metadata", 0);
@@ -1265,85 +1263,15 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&call->mu);
- if (gpr_unref(&bctl->steps_to_complete)) {
- post_batch_completion(exec_ctx, bctl);
- }
+ finish_batch_step(exec_ctx, bctl);
}
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
- grpc_call *call = bctl->call;
- grpc_call *child_call;
- grpc_call *next_child_call;
-
- GRPC_ERROR_REF(error);
-
- gpr_mu_lock(&call->mu);
-
- // If the error has an associated status code, set the call's status.
- intptr_t status;
- if (error != GRPC_ERROR_NONE &&
- grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) {
- set_status_from_error(exec_ctx, call, STATUS_FROM_CORE, error);
- }
-
- if (bctl->send_initial_metadata) {
- if (error != GRPC_ERROR_NONE) {
- set_status_from_error(exec_ctx, call, STATUS_FROM_CORE, error);
- }
- grpc_metadata_batch_destroy(
- exec_ctx,
- &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
- }
- if (bctl->send_message) {
- call->sending_message = 0;
- }
- if (bctl->send_final_op) {
- grpc_metadata_batch_destroy(
- exec_ctx,
- &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
- }
- if (bctl->recv_final_op) {
- grpc_metadata_batch *md =
- &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
- grpc_metadata_batch_filter(exec_ctx, md, recv_trailing_filter, call);
- call->received_final_op = true;
- /* propagate cancellation to any interested children */
- child_call = call->first_child;
- if (child_call != NULL) {
- do {
- next_child_call = child_call->sibling_next;
- if (child_call->cancellation_is_inherited) {
- GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
- grpc_call_cancel(child_call, NULL);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
- }
- child_call = next_child_call;
- } while (child_call != call->first_child);
- }
-
- if (call->is_client) {
- get_final_status(call, set_status_value_directly,
- call->final_op.client.status);
- get_final_details(call, call->final_op.client.status_details,
- call->final_op.client.status_details_capacity);
- } else {
- get_final_status(call, set_cancelled_value,
- call->final_op.server.cancelled);
- }
-
- GRPC_ERROR_UNREF(error);
- error = GRPC_ERROR_NONE;
- }
- add_batch_error(bctl, GRPC_ERROR_REF(error));
- gpr_mu_unlock(&call->mu);
- if (gpr_unref(&bctl->steps_to_complete)) {
- post_batch_completion(exec_ctx, bctl);
- }
-
- GRPC_ERROR_UNREF(error);
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
+ finish_batch_step(exec_ctx, bctl);
}
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
@@ -1377,7 +1305,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");
- bctl->error = GRPC_ERROR_NONE;
if (!is_notify_tag_closure) {
grpc_cq_begin_op(call->cq, notify_tag);
}
@@ -1426,13 +1353,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
const grpc_compression_algorithm calgo =
compression_algorithm_for_level_locked(
call, effective_compression_level);
- char *calgo_name = NULL;
- grpc_compression_algorithm_name(calgo, &calgo_name);
// the following will be picked up by the compress filter and used as
// the call's compression algorithm.
- compression_md.key = GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY;
- compression_md.value = calgo_name;
- compression_md.value_length = strlen(calgo_name);
+ compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
+ compression_md.value = grpc_compression_algorithm_slice(calgo);
additional_metadata_count++;
}
@@ -1525,19 +1449,25 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->send_extra_metadata_count = 1;
call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
exec_ctx, call->channel, op->data.send_status_from_server.status);
- if (op->data.send_status_from_server.status_details != NULL) {
- call->send_extra_metadata[1].md = grpc_mdelem_from_metadata_strings(
- exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_mdstr_from_string(
- op->data.send_status_from_server.status_details));
- call->send_extra_metadata_count++;
- set_status_details(
- exec_ctx, call, STATUS_FROM_API_OVERRIDE,
- GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
- }
- if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
- set_status_code(call, STATUS_FROM_API_OVERRIDE,
- (uint32_t)op->data.send_status_from_server.status);
+ {
+ grpc_error *override_error = GRPC_ERROR_NONE;
+ if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
+ override_error = GRPC_ERROR_CREATE("Error from server send status");
+ }
+ if (op->data.send_status_from_server.status_details != NULL) {
+ call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(
+ *op->data.send_status_from_server.status_details));
+ call->send_extra_metadata_count++;
+ char *msg = grpc_slice_to_c_string(
+ GRPC_MDVALUE(call->send_extra_metadata[1].md));
+ override_error = grpc_error_set_str(
+ override_error, GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+ gpr_free(msg);
+ }
+ set_status_from_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
+ override_error);
}
if (!prepare_application_metadata(
exec_ctx, call,
@@ -1615,8 +1545,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->final_op.client.status = op->data.recv_status_on_client.status;
call->final_op.client.status_details =
op->data.recv_status_on_client.status_details;
- call->final_op.client.status_details_capacity =
- op->data.recv_status_on_client.status_details_capacity;
bctl->recv_final_op = 1;
stream_op->recv_trailing_metadata =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 233340c329..8c46a83d42 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -61,7 +61,7 @@ typedef struct grpc_call_create_args {
const void *server_transport_data;
- grpc_mdelem **add_initial_metadata;
+ grpc_mdelem *add_initial_metadata;
size_t add_initial_metadata_count;
gpr_timespec send_deadline;
diff --git a/src/core/lib/surface/call_details.c b/src/core/lib/surface/call_details.c
index fe73da3f55..d0f88e1969 100644
--- a/src/core/lib/surface/call_details.c
+++ b/src/core/lib/surface/call_details.c
@@ -36,15 +36,21 @@
#include <string.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
void grpc_call_details_init(grpc_call_details* cd) {
GRPC_API_TRACE("grpc_call_details_init(cd=%p)", 1, (cd));
memset(cd, 0, sizeof(*cd));
+ cd->method = grpc_empty_slice();
+ cd->host = grpc_empty_slice();
}
void grpc_call_details_destroy(grpc_call_details* cd) {
GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd));
- gpr_free(cd->method);
- gpr_free(cd->host);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_slice_unref_internal(&exec_ctx, cd->method);
+ grpc_slice_unref_internal(&exec_ctx, cd->host);
+ grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c
index 31c074f15d..7fc58e19c2 100644
--- a/src/core/lib/surface/call_log_batch.c
+++ b/src/core/lib/surface/call_log_batch.c
@@ -35,17 +35,22 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
size_t i;
+ if (md == NULL) {
+ gpr_strvec_add(b, gpr_strdup("(nil)"));
+ return;
+ }
for (i = 0; i < count; i++) {
gpr_strvec_add(b, gpr_strdup("\nkey="));
- gpr_strvec_add(b, gpr_strdup(md[i].key));
+ gpr_strvec_add(b, grpc_slice_to_c_string(md[i].key));
gpr_strvec_add(b, gpr_strdup(" value="));
- gpr_strvec_add(b, gpr_dump(md[i].value, md[i].value_length,
- GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ gpr_strvec_add(b,
+ grpc_dump_slice(md[i].value, GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
}
@@ -70,10 +75,16 @@ char *grpc_op_string(const grpc_op *op) {
gpr_strvec_add(&b, gpr_strdup("SEND_CLOSE_FROM_CLIENT"));
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
- gpr_asprintf(&tmp, "SEND_STATUS_FROM_SERVER status=%d details=%s",
- op->data.send_status_from_server.status,
- op->data.send_status_from_server.status_details);
+ gpr_asprintf(&tmp, "SEND_STATUS_FROM_SERVER status=%d details=",
+ op->data.send_status_from_server.status);
gpr_strvec_add(&b, tmp);
+ if (op->data.send_status_from_server.status_details != NULL) {
+ gpr_strvec_add(&b, grpc_dump_slice(
+ *op->data.send_status_from_server.status_details,
+ GPR_DUMP_ASCII));
+ } else {
+ gpr_strvec_add(&b, gpr_strdup("(null)"));
+ }
add_metadata(&b, op->data.send_status_from_server.trailing_metadata,
op->data.send_status_from_server.trailing_metadata_count);
break;
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index b87295786e..429dbad7c7 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -43,6 +43,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -57,15 +58,15 @@
#define NUM_CACHED_STATUS_ELEMS 3
typedef struct registered_call {
- grpc_mdelem *path;
- grpc_mdelem *authority;
+ grpc_mdelem path;
+ grpc_mdelem authority;
struct registered_call *next;
} registered_call;
struct grpc_channel {
int is_client;
grpc_compression_options compression_options;
- grpc_mdelem *default_authority;
+ grpc_mdelem default_authority;
gpr_mu registered_call_mu;
registered_call *registered_calls;
@@ -102,9 +103,8 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);
if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_ERROR, "channel stack builder failed: %s", msg);
- grpc_error_free_string(msg);
+ gpr_log(GPR_ERROR, "channel stack builder failed: %s",
+ grpc_error_string(error));
GRPC_ERROR_UNREF(error);
goto done;
}
@@ -116,19 +116,19 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
channel->registered_calls = NULL;
grpc_compression_options_init(&channel->compression_options);
-
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
if (args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_ARG_DEFAULT_AUTHORITY);
} else {
- if (channel->default_authority) {
+ if (!GRPC_MDISNULL(channel->default_authority)) {
/* setting this takes precedence over anything else */
GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority);
}
- channel->default_authority = grpc_mdelem_from_strings(
- exec_ctx, ":authority", args->args[i].value.string);
+ channel->default_authority = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_from_copied_string(args->args[i].value.string));
}
} else if (0 ==
strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
@@ -136,14 +136,15 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
- if (channel->default_authority) {
+ if (!GRPC_MDISNULL(channel->default_authority)) {
/* other ways of setting this (notably ssl) take precedence */
gpr_log(GPR_ERROR,
"%s ignored: default host already set some other way",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
- channel->default_authority = grpc_mdelem_from_strings(
- exec_ctx, ":authority", args->args[i].value.string);
+ channel->default_authority = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_from_copied_string(args->args[i].value.string));
}
}
} else if (0 == strcmp(args->args[i].key,
@@ -191,18 +192,18 @@ void grpc_channel_get_info(grpc_channel *channel,
static grpc_call *grpc_channel_create_call_internal(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_completion_queue *cq,
- grpc_pollset_set *pollset_set_alternative, grpc_mdelem *path_mdelem,
- grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
- grpc_mdelem *send_metadata[2];
+ grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem,
+ grpc_mdelem authority_mdelem, gpr_timespec deadline) {
+ grpc_mdelem send_metadata[2];
size_t num_metadata = 0;
GPR_ASSERT(channel->is_client);
GPR_ASSERT(!(cq != NULL && pollset_set_alternative != NULL));
send_metadata[num_metadata++] = path_mdelem;
- if (authority_mdelem != NULL) {
+ if (!GRPC_MDISNULL(authority_mdelem)) {
send_metadata[num_metadata++] = authority_mdelem;
- } else if (channel->default_authority != NULL) {
+ } else if (!GRPC_MDISNULL(channel->default_authority)) {
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
@@ -227,27 +228,17 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
- const char *method, const char *host,
+ grpc_slice method, const grpc_slice *host,
gpr_timespec deadline, void *reserved) {
- GRPC_API_TRACE(
- "grpc_channel_create_call("
- "channel=%p, parent_call=%p, propagation_mask=%x, cq=%p, method=%s, "
- "host=%s, "
- "deadline=gpr_timespec { tv_sec: %" PRId64
- ", tv_nsec: %d, clock_type: %d }, "
- "reserved=%p)",
- 10,
- (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
- deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, cq, NULL,
- grpc_mdelem_from_metadata_strings(&exec_ctx, GRPC_MDSTR_PATH,
- grpc_mdstr_from_string(method)),
- host ? grpc_mdelem_from_metadata_strings(&exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_mdstr_from_string(host))
- : NULL,
+ grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH,
+ grpc_slice_ref_internal(method)),
+ host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(*host))
+ : GRPC_MDNULL,
deadline);
grpc_exec_ctx_finish(&exec_ctx);
return call;
@@ -255,17 +246,16 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask, grpc_pollset_set *pollset_set,
- const char *method, const char *host, gpr_timespec deadline,
- void *reserved) {
+ uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
+ const grpc_slice *host, gpr_timespec deadline, void *reserved) {
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set,
- grpc_mdelem_from_metadata_strings(exec_ctx, GRPC_MDSTR_PATH,
- grpc_mdstr_from_string(method)),
- host ? grpc_mdelem_from_metadata_strings(exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_mdstr_from_string(host))
- : NULL,
+ grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH,
+ grpc_slice_ref_internal(method)),
+ host != NULL ? grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(*host))
+ : GRPC_MDNULL,
deadline);
}
@@ -277,12 +267,15 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
4, (channel, method, host, reserved));
GPR_ASSERT(!reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- rc->path = grpc_mdelem_from_metadata_strings(&exec_ctx, GRPC_MDSTR_PATH,
- grpc_mdstr_from_string(method));
+
+ rc->path = grpc_mdelem_from_slices(
+ &exec_ctx, GRPC_MDSTR_PATH,
+ grpc_slice_intern(grpc_slice_from_static_string(method)));
rc->authority =
- host ? grpc_mdelem_from_metadata_strings(&exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_mdstr_from_string(host))
- : NULL;
+ host ? grpc_mdelem_from_slices(
+ &exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_intern(grpc_slice_from_static_string(host)))
+ : GRPC_MDNULL;
gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls;
channel->registered_calls = rc;
@@ -310,8 +303,7 @@ grpc_call *grpc_channel_create_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL,
- GRPC_MDELEM_REF(rc->path),
- rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
+ GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), deadline);
grpc_exec_ctx_finish(&exec_ctx);
return call;
}
@@ -340,14 +332,10 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
registered_call *rc = channel->registered_calls;
channel->registered_calls = rc->next;
GRPC_MDELEM_UNREF(exec_ctx, rc->path);
- if (rc->authority) {
- GRPC_MDELEM_UNREF(exec_ctx, rc->authority);
- }
+ GRPC_MDELEM_UNREF(exec_ctx, rc->authority);
gpr_free(rc);
}
- if (channel->default_authority != NULL) {
- GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority);
- }
+ GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority);
gpr_mu_destroy(&channel->registered_call_mu);
gpr_free(channel->target);
gpr_free(channel);
@@ -376,8 +364,8 @@ grpc_compression_options grpc_channel_compression_options(
return channel->compression_options;
}
-grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel, int i) {
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel, int i) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
switch (i) {
case 0:
@@ -388,6 +376,6 @@ grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
return GRPC_MDELEM_GRPC_STATUS_2;
}
gpr_ltoa(i, tmp);
- return grpc_mdelem_from_metadata_strings(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
- grpc_mdstr_from_string(tmp));
+ return grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+ grpc_slice_from_copied_string(tmp));
}
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 2ebadb7a15..3a441d7add 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -52,9 +52,8 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
value of \a propagation_mask (see propagation_bits.h for possible values) */
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask, grpc_pollset_set *pollset_set,
- const char *method, const char *host, gpr_timespec deadline,
- void *reserved);
+ uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
+ const grpc_slice *host, gpr_timespec deadline, void *reserved);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
@@ -63,9 +62,9 @@ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
status_code.
The returned elem is owned by the caller. */
-grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel,
- int status_code);
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel,
+ int status_code);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 4613c9021e..1830842d00 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -253,7 +253,6 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
if (grpc_trace_operation_failures && error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
}
- grpc_error_free_string(errmsg);
}
storage->tag = tag;
@@ -294,7 +293,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
if (kick_error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(kick_error);
gpr_log(GPR_ERROR, "Kick failed: %s", msg);
- grpc_error_free_string(msg);
+
GRPC_ERROR_UNREF(kick_error);
}
} else {
@@ -403,8 +402,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
.stolen_completion = NULL,
.tag = NULL,
.first_loop = true};
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
- cq_is_next_finished, &is_finished_arg);
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
@@ -461,7 +460,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_mu_unlock(cc->mu);
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
- grpc_error_free_string(msg);
+
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
@@ -572,8 +571,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
.stolen_completion = NULL,
.tag = tag,
.first_loop = true};
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
- cq_is_pluck_finished, &is_finished_arg);
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, cq_is_pluck_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
@@ -647,7 +646,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_mu_unlock(cc->mu);
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
- grpc_error_free_string(msg);
+
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index f61bf1582e..cfa1882775 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -55,6 +55,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
@@ -178,6 +179,7 @@ void grpc_init(void) {
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
gpr_time_init();
+ grpc_slice_intern_init();
grpc_mdctx_global_init();
grpc_channel_init_init();
grpc_register_tracer("api", &grpc_api_trace);
@@ -242,6 +244,7 @@ void grpc_shutdown(void) {
}
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
+ grpc_slice_intern_shutdown();
}
gpr_mu_unlock(&g_init_mu);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index ae1eac09a9..48de0e1d5b 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -44,10 +44,12 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/transport/static_metadata.h"
typedef struct {
grpc_linked_mdelem status;
grpc_linked_mdelem details;
+ gpr_atm filled_metadata;
} call_data;
typedef struct {
@@ -58,17 +60,23 @@ typedef struct {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) {
call_data *calld = elem->call_data;
+ if (!gpr_atm_no_barrier_cas(&calld->filled_metadata, 0, 1)) {
+ return;
+ }
channel_data *chand = elem->channel_data;
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
- calld->status.md = grpc_mdelem_from_strings(exec_ctx, "grpc-status", tmp);
- calld->details.md =
- grpc_mdelem_from_strings(exec_ctx, "grpc-message", chand->error_message);
+ calld->status.md = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp));
+ calld->details.md = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(chand->error_message));
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb->list.head = &calld->status;
mdb->list.tail = &calld->details;
+ mdb->list.count = 2;
mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
@@ -115,6 +123,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
+ call_data *calld = elem->call_data;
+ gpr_atm_no_barrier_store(&calld->filled_metadata, 0);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index addb7c4fbc..3782b49122 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -98,8 +98,9 @@ typedef struct requested_call {
typedef struct channel_registered_method {
registered_method *server_registered_method;
uint32_t flags;
- grpc_mdstr *method;
- grpc_mdstr *host;
+ bool has_host;
+ grpc_slice method;
+ grpc_slice host;
} channel_registered_method;
struct channel_data {
@@ -144,8 +145,10 @@ struct call_data {
/** the current state of a call - see call_state */
call_state state;
- grpc_mdstr *path;
- grpc_mdstr *host;
+ bool path_set;
+ bool host_set;
+ grpc_slice path;
+ grpc_slice host;
gpr_timespec deadline;
grpc_completion_queue *cq_new;
@@ -277,18 +280,20 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
}
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
- int send_goaway, grpc_error *send_disconnect) {
+ bool send_goaway, grpc_error *send_disconnect) {
struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
grpc_closure_init(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
grpc_channel_element *elem;
- op->send_goaway = send_goaway;
+ op->goaway_error =
+ send_goaway
+ ? grpc_error_set_int(GRPC_ERROR_CREATE("Server shutdown"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
+ : GRPC_ERROR_NONE;
op->set_accept_stream = true;
sc->slice = grpc_slice_from_copied_string("Server shutdown");
- op->goaway_message = &sc->slice;
- op->goaway_status = GRPC_STATUS_OK;
op->disconnect_with_error = send_disconnect;
elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
@@ -448,7 +453,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
- grpc_error_free_string(msg);
}
GRPC_ERROR_UNREF(error);
@@ -461,17 +465,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
op);
}
-static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
- grpc_slice slice = value->slice;
- size_t len = GRPC_SLICE_LENGTH(slice);
-
- if (len + 1 > *capacity) {
- *capacity = GPR_MAX(len + 1, *capacity * 2);
- *dest = gpr_realloc(*dest, *capacity);
- }
- memcpy(*dest, grpc_mdstr_as_c_string(value), len + 1);
-}
-
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
grpc_cq_completion *c) {
requested_call *rc = req;
@@ -500,12 +493,10 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
switch (rc->type) {
case BATCH_CALL:
- GPR_ASSERT(calld->host != NULL);
- GPR_ASSERT(calld->path != NULL);
- cpstr(&rc->data.batch.details->host,
- &rc->data.batch.details->host_capacity, calld->host);
- cpstr(&rc->data.batch.details->method,
- &rc->data.batch.details->method_capacity, calld->path);
+ GPR_ASSERT(calld->host_set);
+ GPR_ASSERT(calld->path_set);
+ rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
+ rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
rc->data.batch.details->deadline = calld->deadline;
rc->data.batch.details->flags =
(calld->recv_idempotent_request
@@ -627,35 +618,39 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
uint32_t hash;
channel_registered_method *rm;
- if (chand->registered_methods && calld->path && calld->host) {
+ if (chand->registered_methods && calld->path_set && calld->host_set) {
/* TODO(ctiller): unify these two searches */
/* check for an exact match with host */
- hash = GRPC_MDSTR_KV_HASH(calld->host->hash, calld->path->hash);
+ hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(calld->host),
+ grpc_slice_hash(calld->path));
for (i = 0; i <= chand->registered_method_max_probes; i++) {
rm = &chand->registered_methods[(hash + i) %
chand->registered_method_slots];
if (!rm) break;
- if (rm->host != calld->host) continue;
- if (rm->method != calld->path) continue;
+ if (!rm->has_host) continue;
+ if (!grpc_slice_eq(rm->host, calld->host)) continue;
+ if (!grpc_slice_eq(rm->method, calld->path)) continue;
if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
- !calld->recv_idempotent_request)
+ !calld->recv_idempotent_request) {
continue;
+ }
finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher,
rm->server_registered_method->payload_handling);
return;
}
/* check for a wildcard method definition (no host set) */
- hash = GRPC_MDSTR_KV_HASH(0, calld->path->hash);
+ hash = GRPC_MDSTR_KV_HASH(0, grpc_slice_hash(calld->path));
for (i = 0; i <= chand->registered_method_max_probes; i++) {
rm = &chand->registered_methods[(hash + i) %
chand->registered_method_slots];
if (!rm) break;
- if (rm->host != NULL) continue;
- if (rm->method != calld->path) continue;
+ if (rm->has_host) continue;
+ if (!grpc_slice_eq(rm->method, calld->path)) continue;
if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
- !calld->recv_idempotent_request)
+ !calld->recv_idempotent_request) {
continue;
+ }
finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher,
rm->server_registered_method->payload_handling);
@@ -744,43 +739,40 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_mdelem *server_filter(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
- if (md->key == GRPC_MDSTR_PATH) {
- if (calld->path == NULL) {
- calld->path = GRPC_MDSTR_REF(md->value);
- }
- return NULL;
- } else if (md->key == GRPC_MDSTR_AUTHORITY) {
- if (calld->host == NULL) {
- calld->host = GRPC_MDSTR_REF(md->value);
- }
- return NULL;
- }
- return md;
-}
-
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
gpr_timespec op_deadline;
- GRPC_ERROR_REF(error);
- grpc_metadata_batch_filter(exec_ctx, calld->recv_initial_metadata,
- server_filter, elem);
+ if (error == GRPC_ERROR_NONE) {
+ GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != NULL);
+ GPR_ASSERT(calld->recv_initial_metadata->idx.named.authority != NULL);
+ calld->path = grpc_slice_ref_internal(
+ GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
+ calld->host = grpc_slice_ref_internal(
+ GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md));
+ calld->path_set = true;
+ calld->host_set = true;
+ grpc_metadata_batch_remove(exec_ctx, calld->recv_initial_metadata,
+ calld->recv_initial_metadata->idx.named.path);
+ grpc_metadata_batch_remove(
+ exec_ctx, calld->recv_initial_metadata,
+ calld->recv_initial_metadata->idx.named.authority);
+ } else {
+ GRPC_ERROR_REF(error);
+ }
op_deadline = calld->recv_initial_metadata->deadline;
if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
calld->deadline = op_deadline;
}
- if (calld->host && calld->path) {
+ if (calld->host_set && calld->path_set) {
/* do nothing */
} else {
- GRPC_ERROR_UNREF(error);
+ grpc_error *src_error = error;
error =
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
+ GRPC_ERROR_UNREF(src_error);
}
grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
@@ -910,11 +902,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(calld->state != PENDING);
- if (calld->host) {
- GRPC_MDSTR_UNREF(exec_ctx, calld->host);
+ if (calld->host_set) {
+ grpc_slice_unref_internal(exec_ctx, calld->host);
}
- if (calld->path) {
- GRPC_MDSTR_UNREF(exec_ctx, calld->path);
+ if (calld->path_set) {
+ grpc_slice_unref_internal(exec_ctx, calld->path);
}
grpc_metadata_array_destroy(&calld->initial_metadata);
@@ -946,11 +938,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
- if (chand->registered_methods[i].method) {
- GRPC_MDSTR_UNREF(exec_ctx, chand->registered_methods[i].method);
- }
- if (chand->registered_methods[i].host) {
- GRPC_MDSTR_UNREF(exec_ctx, chand->registered_methods[i].host);
+ grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
+ if (chand->registered_methods[i].has_host) {
+ grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].host);
}
}
gpr_free(chand->registered_methods);
@@ -1148,8 +1138,6 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
channel_registered_method *crm;
grpc_channel *channel;
channel_data *chand;
- grpc_mdstr *host;
- grpc_mdstr *method;
uint32_t hash;
size_t slots;
uint32_t probes;
@@ -1188,9 +1176,18 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
chand->registered_methods = gpr_malloc(alloc);
memset(chand->registered_methods, 0, alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
- host = rm->host ? grpc_mdstr_from_string(rm->host) : NULL;
- method = grpc_mdstr_from_string(rm->method);
- hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
+ grpc_slice host;
+ bool has_host;
+ grpc_slice method;
+ if (rm->host != NULL) {
+ host = grpc_slice_intern(grpc_slice_from_static_string(rm->host));
+ has_host = true;
+ } else {
+ has_host = false;
+ }
+ method = grpc_slice_intern(grpc_slice_from_static_string(rm->method));
+ hash = GRPC_MDSTR_KV_HASH(has_host ? grpc_slice_hash(host) : 0,
+ grpc_slice_hash(method));
for (probes = 0; chand->registered_methods[(hash + probes) % slots]
.server_registered_method != NULL;
probes++)
@@ -1199,6 +1196,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
crm = &chand->registered_methods[(hash + probes) % slots];
crm->server_registered_method = rm;
crm->flags = rm->flags;
+ crm->has_host = has_host;
crm->host = host;
crm->method = method;
}
diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c
index f49dd8584b..7ec9137265 100644
--- a/src/core/lib/surface/validate_metadata.c
+++ b/src/core/lib/surface/validate_metadata.c
@@ -34,40 +34,71 @@
#include <stdlib.h>
#include <string.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/port_platform.h>
-static int conforms_to(const char *s, size_t len, const uint8_t *legal_bits) {
- const char *p = s;
- const char *e = s + len;
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+
+static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
+ const char *err_desc) {
+ const uint8_t *p = GRPC_SLICE_START_PTR(slice);
+ const uint8_t *e = GRPC_SLICE_END_PTR(slice);
for (; p != e; p++) {
- int idx = (uint8_t)*p;
+ int idx = *p;
int byte = idx / 8;
int bit = idx % 8;
- if ((legal_bits[byte] & (1 << bit)) == 0) return 0;
+ if ((legal_bits[byte] & (1 << bit)) == 0) {
+ char *dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ grpc_error *error = grpc_error_set_str(
+ grpc_error_set_int(GRPC_ERROR_CREATE(err_desc), GRPC_ERROR_INT_OFFSET,
+ p - GRPC_SLICE_START_PTR(slice)),
+ GRPC_ERROR_STR_RAW_BYTES, dump);
+ gpr_free(dump);
+ return error;
+ }
}
- return 1;
+ return GRPC_ERROR_NONE;
+}
+
+static int error2int(grpc_error *error) {
+ int r = (error == GRPC_ERROR_NONE);
+ GRPC_ERROR_UNREF(error);
+ return r;
}
-int grpc_header_key_is_legal(const char *key, size_t length) {
+grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice) {
static const uint8_t legal_header_bits[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00,
0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- if (length == 0 || key[0] == ':') {
- return 0;
+ if (GRPC_SLICE_LENGTH(slice) == 0) {
+ return GRPC_ERROR_CREATE("Metadata keys cannot be zero length");
+ }
+ if (GRPC_SLICE_START_PTR(slice)[0] == ':') {
+ return GRPC_ERROR_CREATE("Metadata keys cannot start with :");
}
- return conforms_to(key, length, legal_header_bits);
+ return conforms_to(slice, legal_header_bits, "Illegal header key");
}
-int grpc_header_nonbin_value_is_legal(const char *value, size_t length) {
+int grpc_header_key_is_legal(grpc_slice slice) {
+ return error2int(grpc_validate_header_key_is_legal(slice));
+}
+
+grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice) {
static const uint8_t legal_header_bits[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- return conforms_to(value, length, legal_header_bits);
+ return conforms_to(slice, legal_header_bits, "Illegal header value");
+}
+
+int grpc_header_nonbin_value_is_legal(grpc_slice slice) {
+ return error2int(grpc_validate_header_nonbin_value_is_legal(slice));
}
-int grpc_is_binary_header(const char *key, size_t length) {
- if (length < 5) return 0;
- return 0 == memcmp(key + length - 4, "-bin", 4);
+int grpc_is_binary_header(grpc_slice slice) {
+ if (GRPC_SLICE_LENGTH(slice) < 5) return 0;
+ return 0 == memcmp(GRPC_SLICE_END_PTR(slice) - 4, "-bin", 4);
}
diff --git a/src/core/lib/surface/validate_metadata.h b/src/core/lib/surface/validate_metadata.h
new file mode 100644
index 0000000000..2b800d25a4
--- /dev/null
+++ b/src/core/lib/surface/validate_metadata.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H
+#define GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H
+
+#include <grpc/slice.h>
+#include "src/core/lib/iomgr/error.h"
+
+grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice);
+grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice);
+
+#endif /* GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H */
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index c656d93740..8fc5bf3e9a 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -163,7 +163,6 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
tracker->name, grpc_connectivity_state_name(tracker->current_state),
grpc_connectivity_state_name(state), reason, error, error_string);
- grpc_error_free_string(error_string);
}
switch (state) {
case GRPC_CHANNEL_INIT:
diff --git a/src/core/lib/transport/error_utils.c b/src/core/lib/transport/error_utils.c
new file mode 100644
index 0000000000..da77828d9c
--- /dev/null
+++ b/src/core/lib/transport/error_utils.c
@@ -0,0 +1,124 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/transport/error_utils.h"
+
+#include "src/core/lib/iomgr/error_internal.h"
+#include "src/core/lib/transport/status_conversion.h"
+
+static grpc_error *recursively_find_error_with_field(grpc_error *error,
+ grpc_error_ints which) {
+ // If the error itself has a status code, return it.
+ if (grpc_error_get_int(error, which, NULL)) {
+ return error;
+ }
+ if (grpc_error_is_special(error)) return NULL;
+ // Otherwise, search through its children.
+ intptr_t key = 0;
+ while (true) {
+ grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++);
+ if (child_error == NULL) break;
+ grpc_error *result = recursively_find_error_with_field(child_error, which);
+ if (result != NULL) return result;
+ }
+ return NULL;
+}
+
+void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
+ grpc_status_code *code, const char **msg,
+ grpc_http2_error_code *http_error) {
+ // Start with the parent error and recurse through the tree of children
+ // until we find the first one that has a status code.
+ grpc_error *found_error =
+ recursively_find_error_with_field(error, GRPC_ERROR_INT_GRPC_STATUS);
+ if (found_error == NULL) {
+ /// If no grpc-status exists, retry through the tree to find a http2 error
+ /// code
+ found_error =
+ recursively_find_error_with_field(error, GRPC_ERROR_INT_HTTP2_ERROR);
+ }
+
+ // If we found an error with a status code above, use that; otherwise,
+ // fall back to using the parent error.
+ if (found_error == NULL) found_error = error;
+
+ grpc_status_code status = GRPC_STATUS_UNKNOWN;
+ intptr_t integer;
+ if (grpc_error_get_int(found_error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) {
+ status = (grpc_status_code)integer;
+ } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR,
+ &integer)) {
+ status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer,
+ deadline);
+ }
+ if (code != NULL) *code = status;
+
+ if (http_error != NULL) {
+ if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR, &integer)) {
+ *http_error = (grpc_http2_error_code)integer;
+ } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_GRPC_STATUS,
+ &integer)) {
+ *http_error = grpc_status_to_http2_error((grpc_status_code)integer);
+ } else {
+ *http_error = found_error == GRPC_ERROR_NONE ? GRPC_HTTP2_NO_ERROR
+ : GRPC_HTTP2_INTERNAL_ERROR;
+ }
+ }
+
+ // If the error has a status message, use it. Otherwise, fall back to
+ // the error description.
+ if (msg != NULL) {
+ *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE);
+ if (*msg == NULL && error != GRPC_ERROR_NONE) {
+ *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION);
+ if (*msg == NULL) *msg = "unknown error"; // Just in case.
+ }
+ }
+
+ if (found_error == NULL) found_error = error;
+}
+
+bool grpc_error_has_clear_grpc_status(grpc_error *error) {
+ if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
+ return true;
+ }
+ intptr_t key = 0;
+ while (true) {
+ grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++);
+ if (child_error == NULL) break;
+ if (grpc_error_has_clear_grpc_status(child_error)) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
new file mode 100644
index 0000000000..105338880a
--- /dev/null
+++ b/src/core/lib/transport/error_utils.h
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H
+#define GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/transport/http2_errors.h"
+
+/// A utility function to get the status code and message to be returned
+/// to the application. If not set in the top-level message, looks
+/// through child errors until it finds the first one with these attributes.
+/// All attributes are pulled from the same child error. If any of the
+/// attributes (code, msg, http_status) are unneeded, they can be passed as
+/// NULL.
+void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
+ grpc_status_code *code, const char **msg,
+ grpc_http2_error_code *http_status);
+
+/// A utility function to check whether there is a clear status code that
+/// doesn't need to be guessed in \a error. This means that \a error or some
+/// child has GRPC_ERROR_INT_GRPC_STATUS set, or that it is GRPC_ERROR_NONE or
+/// GRPC_ERROR_CANCELLED
+bool grpc_error_has_clear_grpc_status(grpc_error *error);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H */
diff --git a/src/core/ext/transport/chttp2/transport/http2_errors.h b/src/core/lib/transport/http2_errors.h
index deab2b7e3e..330bc987f6 100644
--- a/src/core/ext/transport/chttp2/transport/http2_errors.h
+++ b/src/core/lib/transport/http2_errors.h
@@ -31,26 +31,26 @@
*
*/
-#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ERRORS_H
-#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ERRORS_H
+#ifndef GRPC_CORE_LIB_TRANSPORT_HTTP2_ERRORS_H
+#define GRPC_CORE_LIB_TRANSPORT_HTTP2_ERRORS_H
/* error codes for RST_STREAM from http2 draft 14 section 7 */
typedef enum {
- GRPC_CHTTP2_NO_ERROR = 0x0,
- GRPC_CHTTP2_PROTOCOL_ERROR = 0x1,
- GRPC_CHTTP2_INTERNAL_ERROR = 0x2,
- GRPC_CHTTP2_FLOW_CONTROL_ERROR = 0x3,
- GRPC_CHTTP2_SETTINGS_TIMEOUT = 0x4,
- GRPC_CHTTP2_STREAM_CLOSED = 0x5,
- GRPC_CHTTP2_FRAME_SIZE_ERROR = 0x6,
- GRPC_CHTTP2_REFUSED_STREAM = 0x7,
- GRPC_CHTTP2_CANCEL = 0x8,
- GRPC_CHTTP2_COMPRESSION_ERROR = 0x9,
- GRPC_CHTTP2_CONNECT_ERROR = 0xa,
- GRPC_CHTTP2_ENHANCE_YOUR_CALM = 0xb,
- GRPC_CHTTP2_INADEQUATE_SECURITY = 0xc,
+ GRPC_HTTP2_NO_ERROR = 0x0,
+ GRPC_HTTP2_PROTOCOL_ERROR = 0x1,
+ GRPC_HTTP2_INTERNAL_ERROR = 0x2,
+ GRPC_HTTP2_FLOW_CONTROL_ERROR = 0x3,
+ GRPC_HTTP2_SETTINGS_TIMEOUT = 0x4,
+ GRPC_HTTP2_STREAM_CLOSED = 0x5,
+ GRPC_HTTP2_FRAME_SIZE_ERROR = 0x6,
+ GRPC_HTTP2_REFUSED_STREAM = 0x7,
+ GRPC_HTTP2_CANCEL = 0x8,
+ GRPC_HTTP2_COMPRESSION_ERROR = 0x9,
+ GRPC_HTTP2_CONNECT_ERROR = 0xa,
+ GRPC_HTTP2_ENHANCE_YOUR_CALM = 0xb,
+ GRPC_HTTP2_INADEQUATE_SECURITY = 0xc,
/* force use of a default clause */
- GRPC_CHTTP2__ERROR_DO_NOT_USE = -1
-} grpc_chttp2_error_code;
+ GRPC_HTTP2__ERROR_DO_NOT_USE = -1
+} grpc_http2_error_code;
-#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ERRORS_H */
+#endif /* GRPC_CORE_LIB_TRANSPORT_HTTP2_ERRORS_H */
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 54c39df6a7..489c20cbc8 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -48,12 +48,11 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/murmur_hash.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
-grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(grpc_slice input);
-
/* There are two kinds of mdelem and mdstr instances.
* Static instances are declared in static_metadata.{h,c} and
* are initialized by grpc_mdctx_global_init().
@@ -63,9 +62,6 @@ grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(grpc_slice input);
* used to determine which kind of element a pointer refers to.
*/
-#define INITIAL_STRTAB_CAPACITY 4
-#define INITIAL_MDTAB_CAPACITY 4
-
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
#define DEBUG_ARGS , const char *file, int line
#define FWD_DEBUG_ARGS , file, line
@@ -76,37 +72,20 @@ grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(grpc_slice input);
#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s))
#endif
-#define TABLE_IDX(hash, log2_shards, capacity) \
- (((hash) >> (log2_shards)) % (capacity))
-#define SHARD_IDX(hash, log2_shards) ((hash) & ((1 << (log2_shards)) - 1))
-
-typedef void (*destroy_user_data_func)(void *user_data);
-
-#define SIZE_IN_DECODER_TABLE_NOT_SET -1
-/* Shadow structure for grpc_mdstr for non-static values */
-typedef struct internal_string {
- /* must be byte compatible with grpc_mdstr */
- grpc_slice slice;
- uint32_t hash;
-
- /* private only data */
- gpr_atm refcnt;
-
- uint8_t has_base64_and_huffman_encoded;
- grpc_slice_refcount refcount;
+#define INITIAL_SHARD_CAPACITY 8
+#define LOG2_SHARD_COUNT 4
+#define SHARD_COUNT ((size_t)(1 << LOG2_SHARD_COUNT))
- grpc_slice base64_and_huffman;
+#define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity))
+#define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1))
- gpr_atm size_in_decoder_table;
-
- struct internal_string *bucket_next;
-} internal_string;
+typedef void (*destroy_user_data_func)(void *user_data);
-/* Shadow structure for grpc_mdelem for non-static elements */
-typedef struct internal_metadata {
- /* must be byte compatible with grpc_mdelem */
- internal_string *key;
- internal_string *value;
+/* Shadow structure for grpc_mdelem_data for interned elements */
+typedef struct interned_metadata {
+ /* must be byte compatible with grpc_mdelem_data */
+ grpc_slice key;
+ grpc_slice value;
/* private only data */
gpr_atm refcnt;
@@ -115,19 +94,22 @@ typedef struct internal_metadata {
gpr_atm destroy_user_data;
gpr_atm user_data;
- struct internal_metadata *bucket_next;
-} internal_metadata;
+ struct interned_metadata *bucket_next;
+} interned_metadata;
-typedef struct strtab_shard {
- gpr_mu mu;
- internal_string **strs;
- size_t count;
- size_t capacity;
-} strtab_shard;
+/* Shadow structure for grpc_mdelem_data for allocated elements */
+typedef struct allocated_metadata {
+ /* must be byte compatible with grpc_mdelem_data */
+ grpc_slice key;
+ grpc_slice value;
+
+ /* private only data */
+ gpr_atm refcnt;
+} allocated_metadata;
typedef struct mdtab_shard {
gpr_mu mu;
- internal_metadata **elems;
+ interned_metadata **elems;
size_t count;
size_t capacity;
/** Estimate of the number of unreferenced mdelems in the hash table.
@@ -136,102 +118,26 @@ typedef struct mdtab_shard {
gpr_atm free_estimate;
} mdtab_shard;
-#define LOG2_STRTAB_SHARD_COUNT 5
-#define LOG2_MDTAB_SHARD_COUNT 4
-#define STRTAB_SHARD_COUNT ((size_t)(1 << LOG2_STRTAB_SHARD_COUNT))
-#define MDTAB_SHARD_COUNT ((size_t)(1 << LOG2_MDTAB_SHARD_COUNT))
-
-/* hash seed: decided at initialization time */
-static uint32_t g_hash_seed;
-static int g_forced_hash_seed = 0;
-
-/* linearly probed hash tables for static element lookup */
-static grpc_mdstr *g_static_strtab[GRPC_STATIC_MDSTR_COUNT * 2];
-static grpc_mdelem *g_static_mdtab[GRPC_STATIC_MDELEM_COUNT * 2];
-static size_t g_static_strtab_maxprobe;
-static size_t g_static_mdtab_maxprobe;
-
-static strtab_shard g_strtab_shard[STRTAB_SHARD_COUNT];
-static mdtab_shard g_mdtab_shard[MDTAB_SHARD_COUNT];
+static mdtab_shard g_shards[SHARD_COUNT];
static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard);
-void grpc_test_only_set_metadata_hash_seed(uint32_t seed) {
- g_hash_seed = seed;
- g_forced_hash_seed = 1;
-}
-
void grpc_mdctx_global_init(void) {
- size_t i, j;
- if (!g_forced_hash_seed) {
- g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
- }
- g_static_strtab_maxprobe = 0;
- g_static_mdtab_maxprobe = 0;
- /* build static tables */
- memset(g_static_mdtab, 0, sizeof(g_static_mdtab));
- memset(g_static_strtab, 0, sizeof(g_static_strtab));
- for (i = 0; i < GRPC_STATIC_MDSTR_COUNT; i++) {
- grpc_mdstr *elem = &grpc_static_mdstr_table[i];
- const char *str = grpc_static_metadata_strings[i];
- uint32_t hash = gpr_murmur_hash3(str, strlen(str), g_hash_seed);
- *(grpc_slice *)&elem->slice = grpc_slice_from_static_string(str);
- *(uint32_t *)&elem->hash = hash;
- for (j = 0;; j++) {
- size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_strtab);
- if (g_static_strtab[idx] == NULL) {
- g_static_strtab[idx] = &grpc_static_mdstr_table[i];
- break;
- }
- }
- if (j > g_static_strtab_maxprobe) {
- g_static_strtab_maxprobe = j;
- }
- }
- for (i = 0; i < GRPC_STATIC_MDELEM_COUNT; i++) {
- grpc_mdelem *elem = &grpc_static_mdelem_table[i];
- grpc_mdstr *key =
- &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 0]];
- grpc_mdstr *value =
- &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 1]];
- uint32_t hash = GRPC_MDSTR_KV_HASH(key->hash, value->hash);
- *(grpc_mdstr **)&elem->key = key;
- *(grpc_mdstr **)&elem->value = value;
- for (j = 0;; j++) {
- size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_mdtab);
- if (g_static_mdtab[idx] == NULL) {
- g_static_mdtab[idx] = elem;
- break;
- }
- }
- if (j > g_static_mdtab_maxprobe) {
- g_static_mdtab_maxprobe = j;
- }
- }
/* initialize shards */
- for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
- strtab_shard *shard = &g_strtab_shard[i];
- gpr_mu_init(&shard->mu);
- shard->count = 0;
- shard->capacity = INITIAL_STRTAB_CAPACITY;
- shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
- memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
- }
- for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
- mdtab_shard *shard = &g_mdtab_shard[i];
+ for (size_t i = 0; i < SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_shards[i];
gpr_mu_init(&shard->mu);
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
- shard->capacity = INITIAL_MDTAB_CAPACITY;
+ shard->capacity = INITIAL_SHARD_CAPACITY;
shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
}
}
void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx) {
- size_t i;
- for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
- mdtab_shard *shard = &g_mdtab_shard[i];
+ for (size_t i = 0; i < SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
gc_mdtab(exec_ctx, shard);
/* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
@@ -244,212 +150,35 @@ void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx) {
}
gpr_free(shard->elems);
}
- for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
- strtab_shard *shard = &g_strtab_shard[i];
- gpr_mu_destroy(&shard->mu);
- /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
- if (shard->count != 0) {
- gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked",
- shard->count);
- for (size_t j = 0; j < shard->capacity; j++) {
- for (internal_string *s = shard->strs[j]; s; s = s->bucket_next) {
- gpr_log(GPR_DEBUG, "LEAKED: %s",
- grpc_mdstr_as_c_string((grpc_mdstr *)s));
- }
- }
- if (grpc_iomgr_abort_on_leaks()) {
- abort();
- }
- }
- gpr_free(shard->strs);
- }
}
-static int is_mdstr_static(grpc_mdstr *s) {
- return s >= &grpc_static_mdstr_table[0] &&
- s < &grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
-}
-
-static int is_mdelem_static(grpc_mdelem *e) {
- return e >= &grpc_static_mdelem_table[0] &&
- e < &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+static int is_mdelem_static(grpc_mdelem e) {
+ return GRPC_MDELEM_DATA(e) >= &grpc_static_mdelem_table[0] &&
+ GRPC_MDELEM_DATA(e) <
+ &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
}
static void ref_md_locked(mdtab_shard *shard,
- internal_metadata *md DEBUG_ARGS) {
+ interned_metadata *md DEBUG_ARGS) {
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1,
- grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
- grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
}
}
-static void grow_strtab(strtab_shard *shard) {
- size_t capacity = shard->capacity * 2;
- size_t i;
- internal_string **strtab;
- internal_string *s, *next;
-
- GPR_TIMER_BEGIN("grow_strtab", 0);
-
- strtab = gpr_malloc(sizeof(internal_string *) * capacity);
- memset(strtab, 0, sizeof(internal_string *) * capacity);
-
- for (i = 0; i < shard->capacity; i++) {
- for (s = shard->strs[i]; s; s = next) {
- size_t idx = TABLE_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT, capacity);
- next = s->bucket_next;
- s->bucket_next = strtab[idx];
- strtab[idx] = s;
- }
- }
-
- gpr_free(shard->strs);
- shard->strs = strtab;
- shard->capacity = capacity;
-
- GPR_TIMER_END("grow_strtab", 0);
-}
-
-static void internal_destroy_string(grpc_exec_ctx *exec_ctx,
- strtab_shard *shard, internal_string *is) {
- internal_string **prev_next;
- internal_string *cur;
- GPR_TIMER_BEGIN("internal_destroy_string", 0);
- if (is->has_base64_and_huffman_encoded) {
- grpc_slice_unref_internal(exec_ctx, is->base64_and_huffman);
- }
- for (prev_next = &shard->strs[TABLE_IDX(is->hash, LOG2_STRTAB_SHARD_COUNT,
- shard->capacity)],
- cur = *prev_next;
- cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
- ;
- *prev_next = cur->bucket_next;
- shard->count--;
- gpr_free(is);
- GPR_TIMER_END("internal_destroy_string", 0);
-}
-
-static void slice_ref(void *p) {
- internal_string *is =
- (internal_string *)((char *)p - offsetof(internal_string, refcount));
- GRPC_MDSTR_REF((grpc_mdstr *)(is));
-}
-
-static void slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- internal_string *is =
- (internal_string *)((char *)p - offsetof(internal_string, refcount));
- GRPC_MDSTR_UNREF(exec_ctx, (grpc_mdstr *)(is));
-}
-
-grpc_mdstr *grpc_mdstr_from_string(const char *str) {
- return grpc_mdstr_from_buffer((const uint8_t *)str, strlen(str));
-}
-
-grpc_mdstr *grpc_mdstr_from_slice(grpc_exec_ctx *exec_ctx, grpc_slice slice) {
- grpc_mdstr *result = grpc_mdstr_from_buffer(GRPC_SLICE_START_PTR(slice),
- GRPC_SLICE_LENGTH(slice));
- grpc_slice_unref_internal(exec_ctx, slice);
- return result;
-}
-
-grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
- uint32_t hash = gpr_murmur_hash3(buf, length, g_hash_seed);
- internal_string *s;
- strtab_shard *shard =
- &g_strtab_shard[SHARD_IDX(hash, LOG2_STRTAB_SHARD_COUNT)];
- size_t i;
- size_t idx;
-
- GPR_TIMER_BEGIN("grpc_mdstr_from_buffer", 0);
-
- /* search for a static string */
- for (i = 0; i <= g_static_strtab_maxprobe; i++) {
- grpc_mdstr *ss;
- idx = (hash + i) % GPR_ARRAY_SIZE(g_static_strtab);
- ss = g_static_strtab[idx];
- if (ss == NULL) break;
- if (ss->hash == hash && GRPC_SLICE_LENGTH(ss->slice) == length &&
- (length == 0 ||
- 0 == memcmp(buf, GRPC_SLICE_START_PTR(ss->slice), length))) {
- GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
- return ss;
- }
- }
-
- gpr_mu_lock(&shard->mu);
-
- /* search for an existing string */
- idx = TABLE_IDX(hash, LOG2_STRTAB_SHARD_COUNT, shard->capacity);
- for (s = shard->strs[idx]; s; s = s->bucket_next) {
- if (s->hash == hash && GRPC_SLICE_LENGTH(s->slice) == length &&
- 0 == memcmp(buf, GRPC_SLICE_START_PTR(s->slice), length)) {
- if (gpr_atm_full_fetch_add(&s->refcnt, 1) == 0) {
- /* If we get here, we've added a ref to something that was about to
- * die - drop it immediately.
- * The *only* possible path here (given the shard mutex) should be to
- * drop from one ref back to zero - assert that with a CAS */
- GPR_ASSERT(gpr_atm_rel_cas(&s->refcnt, 1, 0));
- /* and treat this as if we were never here... sshhh */
- } else {
- gpr_mu_unlock(&shard->mu);
- GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
- return (grpc_mdstr *)s;
- }
- }
- }
-
- /* not found: create a new string */
- if (length + 1 < GRPC_SLICE_INLINED_SIZE) {
- /* string data goes directly into the slice */
- s = gpr_malloc(sizeof(internal_string));
- gpr_atm_rel_store(&s->refcnt, 1);
- s->slice.refcount = NULL;
- memcpy(s->slice.data.inlined.bytes, buf, length);
- s->slice.data.inlined.bytes[length] = 0;
- s->slice.data.inlined.length = (uint8_t)length;
- } else {
- /* string data goes after the internal_string header, and we +1 for null
- terminator */
- s = gpr_malloc(sizeof(internal_string) + length + 1);
- gpr_atm_rel_store(&s->refcnt, 1);
- s->refcount.ref = slice_ref;
- s->refcount.unref = slice_unref;
- s->slice.refcount = &s->refcount;
- s->slice.data.refcounted.bytes = (uint8_t *)(s + 1);
- s->slice.data.refcounted.length = length;
- memcpy(s->slice.data.refcounted.bytes, buf, length);
- /* add a null terminator for cheap c string conversion when desired */
- s->slice.data.refcounted.bytes[length] = 0;
- }
- s->has_base64_and_huffman_encoded = 0;
- s->hash = hash;
- s->size_in_decoder_table = SIZE_IN_DECODER_TABLE_NOT_SET;
- s->bucket_next = shard->strs[idx];
- shard->strs[idx] = s;
-
- shard->count++;
-
- if (shard->count > shard->capacity * 2) {
- grow_strtab(shard);
- }
-
- gpr_mu_unlock(&shard->mu);
- GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
-
- return (grpc_mdstr *)s;
-}
-
static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
size_t i;
- internal_metadata **prev_next;
- internal_metadata *md, *next;
+ interned_metadata **prev_next;
+ interned_metadata *md, *next;
gpr_atm num_freed = 0;
GPR_TIMER_BEGIN("gc_mdtab", 0);
@@ -459,8 +188,8 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
- GRPC_MDSTR_UNREF(exec_ctx, (grpc_mdstr *)md->key);
- GRPC_MDSTR_UNREF(exec_ctx, (grpc_mdstr *)md->value);
+ grpc_slice_unref_internal(exec_ctx, md->key);
+ grpc_slice_unref_internal(exec_ctx, md->value);
if (md->user_data) {
((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->destroy_user_data))(user_data);
@@ -481,21 +210,22 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
static void grow_mdtab(mdtab_shard *shard) {
size_t capacity = shard->capacity * 2;
size_t i;
- internal_metadata **mdtab;
- internal_metadata *md, *next;
+ interned_metadata **mdtab;
+ interned_metadata *md, *next;
uint32_t hash;
GPR_TIMER_BEGIN("grow_mdtab", 0);
- mdtab = gpr_malloc(sizeof(internal_metadata *) * capacity);
- memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
+ mdtab = gpr_malloc(sizeof(interned_metadata *) * capacity);
+ memset(mdtab, 0, sizeof(interned_metadata *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
size_t idx;
- hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
+ hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
+ grpc_slice_hash(md->value));
next = md->bucket_next;
- idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, capacity);
+ idx = TABLE_IDX(hash, capacity);
md->bucket_next = mdtab[idx];
mdtab[idx] = md;
}
@@ -517,62 +247,77 @@ static void rehash_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
}
}
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_exec_ctx *exec_ctx,
- grpc_mdstr *mkey,
- grpc_mdstr *mvalue) {
- internal_string *key = (internal_string *)mkey;
- internal_string *value = (internal_string *)mvalue;
- uint32_t hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
- internal_metadata *md;
- mdtab_shard *shard = &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
- size_t i;
- size_t idx;
+grpc_mdelem grpc_mdelem_create(
+ grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value,
+ grpc_mdelem_data *compatible_external_backing_store) {
+ if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) {
+ if (compatible_external_backing_store != NULL) {
+ return GRPC_MAKE_MDELEM(compatible_external_backing_store,
+ GRPC_MDELEM_STORAGE_EXTERNAL);
+ }
- GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
+ allocated_metadata *allocated = gpr_malloc(sizeof(*allocated));
+ allocated->key = grpc_slice_ref_internal(key);
+ allocated->value = grpc_slice_ref_internal(value);
+ gpr_atm_rel_store(&allocated->refcnt, 1);
+#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ char *key_str = grpc_slice_to_c_string(allocated->key);
+ char *value_str = grpc_slice_to_c_string(allocated->value);
+ gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%zu: '%s' = '%s'", (void *)allocated,
+ gpr_atm_no_barrier_load(&allocated->refcnt), key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+#endif
+ return GRPC_MAKE_MDELEM(allocated, GRPC_MDELEM_STORAGE_ALLOCATED);
+ }
- if (is_mdstr_static(mkey) && is_mdstr_static(mvalue)) {
- for (i = 0; i <= g_static_mdtab_maxprobe; i++) {
- grpc_mdelem *smd;
- idx = (hash + i) % GPR_ARRAY_SIZE(g_static_mdtab);
- smd = g_static_mdtab[idx];
- if (smd == NULL) break;
- if (smd->key == mkey && smd->value == mvalue) {
- GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
- return smd;
- }
+ if (GRPC_IS_STATIC_METADATA_STRING(key) &&
+ GRPC_IS_STATIC_METADATA_STRING(value)) {
+ grpc_mdelem static_elem = grpc_static_mdelem_for_static_strings(
+ GRPC_STATIC_METADATA_INDEX(key), GRPC_STATIC_METADATA_INDEX(value));
+ if (!GRPC_MDISNULL(static_elem)) {
+ return static_elem;
}
}
+ uint32_t hash =
+ GRPC_MDSTR_KV_HASH(grpc_slice_hash(key), grpc_slice_hash(value));
+ interned_metadata *md;
+ mdtab_shard *shard = &g_shards[SHARD_IDX(hash)];
+ size_t idx;
+
+ GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
+
gpr_mu_lock(&shard->mu);
- idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, shard->capacity);
+ idx = TABLE_IDX(hash, shard->capacity);
/* search for an existing pair */
for (md = shard->elems[idx]; md; md = md->bucket_next) {
- if (md->key == key && md->value == value) {
+ if (grpc_slice_eq(key, md->key) && grpc_slice_eq(value, md->value)) {
REF_MD_LOCKED(shard, md);
- GRPC_MDSTR_UNREF(exec_ctx, (grpc_mdstr *)key);
- GRPC_MDSTR_UNREF(exec_ctx, (grpc_mdstr *)value);
gpr_mu_unlock(&shard->mu);
GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
- return (grpc_mdelem *)md;
+ return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
}
}
/* not found: create a new pair */
- md = gpr_malloc(sizeof(internal_metadata));
+ md = gpr_malloc(sizeof(interned_metadata));
gpr_atm_rel_store(&md->refcnt, 1);
- md->key = key;
- md->value = value;
+ md->key = grpc_slice_ref_internal(key);
+ md->value = grpc_slice_ref_internal(value);
md->user_data = 0;
md->destroy_user_data = 0;
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
gpr_log(GPR_DEBUG, "ELM NEW:%p:%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
- grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+ gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
shard->count++;
@@ -584,29 +329,26 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
- return (grpc_mdelem *)md;
-}
-
-grpc_mdelem *grpc_mdelem_from_strings(grpc_exec_ctx *exec_ctx, const char *key,
- const char *value) {
- return grpc_mdelem_from_metadata_strings(
- exec_ctx, grpc_mdstr_from_string(key), grpc_mdstr_from_string(value));
+ return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
}
-grpc_mdelem *grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
- grpc_slice value) {
- return grpc_mdelem_from_metadata_strings(
- exec_ctx, grpc_mdstr_from_slice(exec_ctx, key),
- grpc_mdstr_from_slice(exec_ctx, value));
+grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
+ grpc_slice value) {
+ grpc_mdelem out = grpc_mdelem_create(exec_ctx, key, value, NULL);
+ grpc_slice_unref_internal(exec_ctx, key);
+ grpc_slice_unref_internal(exec_ctx, value);
+ return out;
}
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_exec_ctx *exec_ctx,
- const char *key,
- const uint8_t *value,
- size_t value_length) {
- return grpc_mdelem_from_metadata_strings(
- exec_ctx, grpc_mdstr_from_string(key),
- grpc_mdstr_from_buffer(value, value_length));
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_metadata *metadata) {
+ bool changed = false;
+ grpc_slice key_slice =
+ grpc_slice_maybe_static_intern(metadata->key, &changed);
+ grpc_slice value_slice =
+ grpc_slice_maybe_static_intern(metadata->value, &changed);
+ return grpc_mdelem_create(exec_ctx, key_slice, value_slice,
+ changed ? NULL : (grpc_mdelem_data *)metadata);
}
static size_t get_base64_encoded_size(size_t raw_length) {
@@ -614,160 +356,176 @@ static size_t get_base64_encoded_size(size_t raw_length) {
return raw_length / 3 * 4 + tail_xtra[raw_length % 3];
}
-size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem) {
- size_t overhead_and_key = 32 + GRPC_SLICE_LENGTH(elem->key->slice);
- size_t value_len = GRPC_SLICE_LENGTH(elem->value->slice);
- if (is_mdstr_static(elem->value)) {
- if (grpc_is_binary_header(
- (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
- GRPC_SLICE_LENGTH(elem->key->slice))) {
- return overhead_and_key + get_base64_encoded_size(value_len);
- } else {
- return overhead_and_key + value_len;
- }
+size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem) {
+ size_t overhead_and_key = 32 + GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
+ size_t value_len = GRPC_SLICE_LENGTH(GRPC_MDVALUE(elem));
+ if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
+ return overhead_and_key + get_base64_encoded_size(value_len);
} else {
- internal_string *is = (internal_string *)elem->value;
- gpr_atm current_size = gpr_atm_acq_load(&is->size_in_decoder_table);
- if (current_size == SIZE_IN_DECODER_TABLE_NOT_SET) {
- if (grpc_is_binary_header(
- (const char *)GRPC_SLICE_START_PTR(elem->key->slice),
- GRPC_SLICE_LENGTH(elem->key->slice))) {
- current_size = (gpr_atm)get_base64_encoded_size(value_len);
- } else {
- current_size = (gpr_atm)value_len;
- }
- gpr_atm_rel_store(&is->size_in_decoder_table, current_size);
- }
- return overhead_and_key + (size_t)current_size;
+ return overhead_and_key + value_len;
}
}
-grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
- internal_metadata *md = (internal_metadata *)gmd;
- if (is_mdelem_static(gmd)) return gmd;
+grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
+ switch (GRPC_MDELEM_STORAGE(gmd)) {
+ case GRPC_MDELEM_STORAGE_EXTERNAL:
+ case GRPC_MDELEM_STORAGE_STATIC:
+ break;
+ case GRPC_MDELEM_STORAGE_INTERNED: {
+ interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1,
- grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
- grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
- /* we can assume the ref count is >= 1 as the application is calling
- this function - meaning that no adjustment to mdtab_free is necessary,
- simplifying the logic here to be just an atomic increment */
- /* use C assert to have this removed in opt builds */
- GPR_ASSERT(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
- gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
- return gmd;
-}
-
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem *gmd DEBUG_ARGS) {
- internal_metadata *md = (internal_metadata *)gmd;
- if (!md) return;
- if (is_mdelem_static(gmd)) return;
+ /* we can assume the ref count is >= 1 as the application is calling
+ this function - meaning that no adjustment to mdtab_free is necessary,
+ simplifying the logic here to be just an atomic increment */
+ /* use C assert to have this removed in opt builds */
+ GPR_ASSERT(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
+ gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
+ break;
+ }
+ case GRPC_MDELEM_STORAGE_ALLOCATED: {
+ allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) - 1,
- grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
- grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
- uint32_t hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
- const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
- GPR_ASSERT(prev_refcount >= 1);
- if (1 == prev_refcount) {
- /* once the refcount hits zero, some other thread can come along and
- free md at any time: it's unsafe from this point on to access it */
- mdtab_shard *shard =
- &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
- gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
+ /* we can assume the ref count is >= 1 as the application is calling
+ this function - meaning that no adjustment to mdtab_free is necessary,
+ simplifying the logic here to be just an atomic increment */
+ /* use C assert to have this removed in opt builds */
+ gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
+ break;
+ }
}
+ return gmd;
}
-const char *grpc_mdstr_as_c_string(const grpc_mdstr *s) {
- return (const char *)GRPC_SLICE_START_PTR(s->slice);
-}
-
-size_t grpc_mdstr_length(const grpc_mdstr *s) { return GRPC_MDSTR_LENGTH(s); }
-
-grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
- internal_string *s = (internal_string *)gs;
- if (is_mdstr_static(gs)) return gs;
+void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
+ switch (GRPC_MDELEM_STORAGE(gmd)) {
+ case GRPC_MDELEM_STORAGE_EXTERNAL:
+ case GRPC_MDELEM_STORAGE_STATIC:
+ break;
+ case GRPC_MDELEM_STORAGE_INTERNED: {
+ interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%zu->%zu: '%s'",
- (void *)s, gpr_atm_no_barrier_load(&s->refcnt),
- gpr_atm_no_barrier_load(&s->refcnt) + 1, grpc_mdstr_as_c_string(gs));
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
- GPR_ASSERT(gpr_atm_full_fetch_add(&s->refcnt, 1) > 0);
- return gs;
-}
-
-void grpc_mdstr_unref(grpc_exec_ctx *exec_ctx, grpc_mdstr *gs DEBUG_ARGS) {
- internal_string *s = (internal_string *)gs;
- if (is_mdstr_static(gs)) return;
+ uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
+ grpc_slice_hash(md->value));
+ const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
+ GPR_ASSERT(prev_refcount >= 1);
+ if (1 == prev_refcount) {
+ /* once the refcount hits zero, some other thread can come along and
+ free md at any time: it's unsafe from this point on to access it */
+ mdtab_shard *shard = &g_shards[SHARD_IDX(hash)];
+ gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
+ }
+ break;
+ }
+ case GRPC_MDELEM_STORAGE_ALLOCATED: {
+ allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%zu->%zu: '%s'",
- (void *)s, gpr_atm_no_barrier_load(&s->refcnt),
- gpr_atm_no_barrier_load(&s->refcnt) - 1, grpc_mdstr_as_c_string(gs));
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
#endif
- if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
- strtab_shard *shard =
- &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
- gpr_mu_lock(&shard->mu);
- GPR_ASSERT(0 == gpr_atm_no_barrier_load(&s->refcnt));
- internal_destroy_string(exec_ctx, shard, s);
- gpr_mu_unlock(&shard->mu);
+ const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
+ GPR_ASSERT(prev_refcount >= 1);
+ if (1 == prev_refcount) {
+ grpc_slice_unref_internal(exec_ctx, md->key);
+ grpc_slice_unref_internal(exec_ctx, md->value);
+ gpr_free(md);
+ }
+ break;
+ }
}
}
-void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
- internal_metadata *im = (internal_metadata *)md;
- void *result;
- if (is_mdelem_static(md)) {
- return (void *)grpc_static_mdelem_user_data[md - grpc_static_mdelem_table];
- }
- if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
- return (void *)gpr_atm_no_barrier_load(&im->user_data);
- } else {
- return NULL;
+void *grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void *)) {
+ switch (GRPC_MDELEM_STORAGE(md)) {
+ case GRPC_MDELEM_STORAGE_EXTERNAL:
+ case GRPC_MDELEM_STORAGE_ALLOCATED:
+ return NULL;
+ case GRPC_MDELEM_STORAGE_STATIC:
+ return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
+ grpc_static_mdelem_table];
+ case GRPC_MDELEM_STORAGE_INTERNED: {
+ interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md);
+ void *result;
+ if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
+ return (void *)gpr_atm_no_barrier_load(&im->user_data);
+ } else {
+ return NULL;
+ }
+ return result;
+ }
}
- return result;
+ GPR_UNREACHABLE_CODE(return NULL);
}
-void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
+void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
void *user_data) {
- internal_metadata *im = (internal_metadata *)md;
- GPR_ASSERT(!is_mdelem_static(md));
- GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
- gpr_mu_lock(&im->mu_user_data);
- if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
- /* user data can only be set once */
- gpr_mu_unlock(&im->mu_user_data);
- if (destroy_func != NULL) {
+ switch (GRPC_MDELEM_STORAGE(md)) {
+ case GRPC_MDELEM_STORAGE_EXTERNAL:
+ case GRPC_MDELEM_STORAGE_ALLOCATED:
destroy_func(user_data);
+ return NULL;
+ case GRPC_MDELEM_STORAGE_STATIC:
+ destroy_func(user_data);
+ return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
+ grpc_static_mdelem_table];
+ case GRPC_MDELEM_STORAGE_INTERNED: {
+ interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md);
+ GPR_ASSERT(!is_mdelem_static(md));
+ GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
+ gpr_mu_lock(&im->mu_user_data);
+ if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
+ /* user data can only be set once */
+ gpr_mu_unlock(&im->mu_user_data);
+ if (destroy_func != NULL) {
+ destroy_func(user_data);
+ }
+ return (void *)gpr_atm_no_barrier_load(&im->user_data);
+ }
+ gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
+ gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
+ gpr_mu_unlock(&im->mu_user_data);
+ return user_data;
}
- return (void *)gpr_atm_no_barrier_load(&im->user_data);
}
- gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
- gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
- gpr_mu_unlock(&im->mu_user_data);
- return user_data;
+ GPR_UNREACHABLE_CODE(return NULL);
}
-grpc_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
- internal_string *s = (internal_string *)gs;
- grpc_slice slice;
- strtab_shard *shard =
- &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
- gpr_mu_lock(&shard->mu);
- if (!s->has_base64_and_huffman_encoded) {
- s->base64_and_huffman =
- grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
- s->has_base64_and_huffman_encoded = 1;
- }
- slice = s->base64_and_huffman;
- gpr_mu_unlock(&shard->mu);
- return slice;
+bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b) {
+ if (a.payload == b.payload) return true;
+ if (GRPC_MDELEM_IS_INTERNED(a) && GRPC_MDELEM_IS_INTERNED(b)) return false;
+ if (GRPC_MDISNULL(a) || GRPC_MDISNULL(b)) return false;
+ return grpc_slice_eq(GRPC_MDKEY(a), GRPC_MDKEY(b)) &&
+ grpc_slice_eq(GRPC_MDVALUE(a), GRPC_MDVALUE(b));
}
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 991eee96f1..f4ba86c854 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -34,6 +34,7 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_H
#define GRPC_CORE_LIB_TRANSPORT_METADATA_H
+#include <grpc/grpc.h>
#include <grpc/slice.h>
#include <grpc/support/useful.h>
@@ -74,110 +75,110 @@ extern "C" {
declared here - in which case those functions are effectively no-ops. */
/* Forward declarations */
-typedef struct grpc_mdstr grpc_mdstr;
typedef struct grpc_mdelem grpc_mdelem;
-/* if changing this, make identical changes in internal_string in metadata.c */
-struct grpc_mdstr {
- const grpc_slice slice;
- const uint32_t hash;
+/* if changing this, make identical changes in:
+ - interned_metadata, allocated_metadata in metadata.c
+ - grpc_metadata in grpc_types.h */
+typedef struct grpc_mdelem_data {
+ const grpc_slice key;
+ const grpc_slice value;
/* there is a private part to this in metadata.c */
-};
+} grpc_mdelem_data;
+
+/* GRPC_MDELEM_STORAGE_* enum values that can be treated as interned always have
+ this bit set in their integer value */
+#define GRPC_MDELEM_STORAGE_INTERNED_BIT 1
+
+typedef enum {
+ /* memory pointed to by grpc_mdelem::payload is owned by an external system */
+ GRPC_MDELEM_STORAGE_EXTERNAL = 0,
+ /* memory pointed to by grpc_mdelem::payload is interned by the metadata
+ system */
+ GRPC_MDELEM_STORAGE_INTERNED = GRPC_MDELEM_STORAGE_INTERNED_BIT,
+ /* memory pointed to by grpc_mdelem::payload is allocated by the metadata
+ system */
+ GRPC_MDELEM_STORAGE_ALLOCATED = 2,
+ /* memory is in the static metadata table */
+ GRPC_MDELEM_STORAGE_STATIC = 2 | GRPC_MDELEM_STORAGE_INTERNED_BIT,
+} grpc_mdelem_data_storage;
-/* if changing this, make identical changes in internal_metadata in
- metadata.c */
struct grpc_mdelem {
- grpc_mdstr *const key;
- grpc_mdstr *const value;
- /* there is a private part to this in metadata.c */
+ /* a grpc_mdelem_data* generally, with the two lower bits signalling memory
+ ownership as per grpc_mdelem_data_storage */
+ uintptr_t payload;
};
-void grpc_test_only_set_metadata_hash_seed(uint32_t seed);
-
-/* Constructors for grpc_mdstr instances; take a variety of data types that
- clients may have handy */
-grpc_mdstr *grpc_mdstr_from_string(const char *str);
-/* Unrefs the slice. */
-grpc_mdstr *grpc_mdstr_from_slice(grpc_exec_ctx *exec_ctx, grpc_slice slice);
-grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *str, size_t length);
-
-/* Returns a borrowed slice from the mdstr with its contents base64 encoded
- and huffman compressed */
-grpc_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *str);
-
-/* Constructors for grpc_mdelem instances; take a variety of data types that
- clients may have handy */
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_exec_ctx *exec_ctx,
- grpc_mdstr *key,
- grpc_mdstr *value);
-grpc_mdelem *grpc_mdelem_from_strings(grpc_exec_ctx *exec_ctx, const char *key,
- const char *value);
+#define GRPC_MDELEM_DATA(md) \
+ ((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3))
+#define GRPC_MDELEM_STORAGE(md) \
+ ((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
+#define GRPC_MAKE_MDELEM(data, storage) \
+ ((grpc_mdelem){((uintptr_t)(data)) | ((uintptr_t)storage)})
+#define GRPC_MDELEM_IS_INTERNED(md) \
+ ((grpc_mdelem_data_storage)((md).payload & \
+ (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
+
/* Unrefs the slices. */
-grpc_mdelem *grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
- grpc_slice value);
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_exec_ctx *exec_ctx,
- const char *key,
- const uint8_t *value,
- size_t value_length);
+grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
+ grpc_slice value);
+
+/* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
+ object as backing storage (so lifetimes should align) */
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_metadata *metadata);
-size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem);
+/* Does not unref the slices; if a new non-interned mdelem is needed, allocates
+ one if compatible_external_backing_store is NULL, or uses
+ compatible_external_backing_store if it is non-NULL (in which case it's the
+ users responsibility to ensure that it outlives usage) */
+grpc_mdelem grpc_mdelem_create(
+ grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value,
+ grpc_mdelem_data *compatible_external_backing_store);
+
+bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
+
+size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem);
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */
-void *grpc_mdelem_get_user_data(grpc_mdelem *md,
+void *grpc_mdelem_get_user_data(grpc_mdelem md,
void (*if_destroy_func)(void *));
-void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
+void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
void *user_data);
/* Reference counting */
//#define GRPC_METADATA_REFCOUNT_DEBUG
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
-#define GRPC_MDSTR_REF(s) grpc_mdstr_ref((s), __FILE__, __LINE__)
-#define GRPC_MDSTR_UNREF(exec_ctx, s) \
- grpc_mdstr_unref((exec_ctx), (s), __FILE__, __LINE__)
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
#define GRPC_MDELEM_UNREF(exec_ctx, s) \
grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__)
-grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *s, const char *file, int line);
-void grpc_mdstr_unref(grpc_exec_ctx *exec_ctx, grpc_mdstr *s, const char *file,
- int line);
-grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *md, const char *file, int line);
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem *md,
+grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char *file, int line);
+void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md,
const char *file, int line);
#else
-#define GRPC_MDSTR_REF(s) grpc_mdstr_ref((s))
-#define GRPC_MDSTR_UNREF(exec_ctx, s) grpc_mdstr_unref((exec_ctx), (s))
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
#define GRPC_MDELEM_UNREF(exec_ctx, s) grpc_mdelem_unref((exec_ctx), (s))
-grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *s);
-void grpc_mdstr_unref(grpc_exec_ctx *exec_ctx, grpc_mdstr *s);
-grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *md);
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem *md);
+grpc_mdelem grpc_mdelem_ref(grpc_mdelem md);
+void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md);
#endif
-/* Recover a char* from a grpc_mdstr. The returned string is null terminated.
- Does not promise that the returned string has no embedded nulls however. */
-const char *grpc_mdstr_as_c_string(const grpc_mdstr *s);
+#define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
+#define GRPC_MDVALUE(md) (GRPC_MDELEM_DATA(md)->value)
-#define GRPC_MDSTR_LENGTH(s) (GRPC_SLICE_LENGTH(s->slice))
+#define GRPC_MDNULL GRPC_MAKE_MDELEM(NULL, GRPC_MDELEM_STORAGE_EXTERNAL)
+#define GRPC_MDISNULL(md) (GRPC_MDELEM_DATA(md) == NULL)
/* We add 32 bytes of padding as per RFC-7540 section 6.5.2. */
-#define GRPC_MDELEM_LENGTH(e) \
- (GRPC_MDSTR_LENGTH((e)->key) + GRPC_MDSTR_LENGTH((e)->value) + 32)
-
-int grpc_mdstr_is_legal_header(grpc_mdstr *s);
-int grpc_mdstr_is_legal_nonbin_header(grpc_mdstr *s);
-int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s);
+#define GRPC_MDELEM_LENGTH(e) \
+ (GRPC_SLICE_LENGTH(GRPC_MDKEY((e))) + GRPC_SLICE_LENGTH(GRPC_MDVALUE((e))) + \
+ 32)
#define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
void grpc_mdctx_global_init(void);
void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx);
-/* Implementation provided by chttp2_transport */
-extern grpc_slice (*grpc_chttp2_base64_encode_and_huffman_compress)(
- grpc_slice input);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c
index b62ecc3aa6..95b71d33d7 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.c
@@ -40,6 +40,8 @@
#include <grpc/support/log.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
@@ -51,16 +53,34 @@ static void assert_valid_list(grpc_mdelem_list *list) {
GPR_ASSERT(list->tail->next == NULL);
GPR_ASSERT((list->head == list->tail) == (list->head->next == NULL));
+ size_t verified_count = 0;
for (l = list->head; l; l = l->next) {
- GPR_ASSERT(l->md);
+ GPR_ASSERT(!GRPC_MDISNULL(l->md));
GPR_ASSERT((l->prev == NULL) == (l == list->head));
GPR_ASSERT((l->next == NULL) == (l == list->tail));
if (l->next) GPR_ASSERT(l->next->prev == l);
if (l->prev) GPR_ASSERT(l->prev->next == l);
+ verified_count++;
}
+ GPR_ASSERT(list->count == verified_count);
#endif /* NDEBUG */
}
+static void assert_valid_callouts(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch) {
+#ifndef NDEBUG
+ for (grpc_linked_mdelem *l = batch->list.head; l != NULL; l = l->next) {
+ grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md));
+ grpc_metadata_batch_callouts_index callout_idx =
+ GRPC_BATCH_INDEX_OF(key_interned);
+ if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) {
+ GPR_ASSERT(batch->idx.array[callout_idx] == l);
+ }
+ grpc_slice_unref_internal(exec_ctx, key_interned);
+ }
+#endif
+}
+
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
assert_valid_list(&batch->list);
@@ -68,7 +88,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
#endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
- batch->list.head = batch->list.tail = NULL;
+ memset(batch, 0, sizeof(*batch));
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
@@ -80,17 +100,58 @@ void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
- grpc_mdelem *elem_to_add) {
- GPR_ASSERT(elem_to_add);
+grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md) {
+ char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ grpc_error *out = grpc_error_set_str(
+ grpc_error_set_str(src, GRPC_ERROR_STR_KEY, k), GRPC_ERROR_STR_VALUE, v);
+ gpr_free(k);
+ gpr_free(v);
+ return out;
+}
+
+static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage)
+ GRPC_MUST_USE_RESULT;
+
+static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) {
+ grpc_metadata_batch_callouts_index idx =
+ GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
+ if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
+ return GRPC_ERROR_NONE;
+ }
+ if (batch->idx.array[idx] == NULL) {
+ batch->idx.array[idx] = storage;
+ return GRPC_ERROR_NONE;
+ }
+ return grpc_attach_md_to_error(
+ GRPC_ERROR_CREATE("Unallowed duplicate metadata"), storage->md);
+}
+
+static void maybe_unlink_callout(grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) {
+ grpc_metadata_batch_callouts_index idx =
+ GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
+ if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
+ return;
+ }
+ GPR_ASSERT(batch->idx.array[idx] != NULL);
+ batch->idx.array[idx] = NULL;
+}
+
+grpc_error *grpc_metadata_batch_add_head(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage,
+ grpc_mdelem elem_to_add) {
+ GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
storage->md = elem_to_add;
- grpc_metadata_batch_link_head(batch, storage);
+ return grpc_metadata_batch_link_head(exec_ctx, batch, storage);
}
static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
- GPR_ASSERT(storage->md);
+ GPR_ASSERT(!GRPC_MDISNULL(storage->md));
storage->prev = NULL;
storage->next = list->head;
if (list->head != NULL) {
@@ -99,25 +160,36 @@ static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
list->tail = storage;
}
list->head = storage;
+ list->count++;
assert_valid_list(list);
}
-void grpc_metadata_batch_link_head(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+grpc_error *grpc_metadata_batch_link_head(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) {
+ assert_valid_callouts(exec_ctx, batch);
+ grpc_error *err = maybe_link_callout(batch, storage);
+ if (err != GRPC_ERROR_NONE) {
+ assert_valid_callouts(exec_ctx, batch);
+ return err;
+ }
link_head(&batch->list, storage);
+ assert_valid_callouts(exec_ctx, batch);
+ return GRPC_ERROR_NONE;
}
-void grpc_metadata_batch_add_tail(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
- grpc_mdelem *elem_to_add) {
- GPR_ASSERT(elem_to_add);
+grpc_error *grpc_metadata_batch_add_tail(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage,
+ grpc_mdelem elem_to_add) {
+ GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
storage->md = elem_to_add;
- grpc_metadata_batch_link_tail(batch, storage);
+ return grpc_metadata_batch_link_tail(exec_ctx, batch, storage);
}
static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
- GPR_ASSERT(storage->md);
+ GPR_ASSERT(!GRPC_MDISNULL(storage->md));
storage->prev = list->tail;
storage->next = NULL;
storage->reserved = NULL;
@@ -127,70 +199,82 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
list->head = storage;
}
list->tail = storage;
+ list->count++;
assert_valid_list(list);
}
-void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+grpc_error *grpc_metadata_batch_link_tail(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) {
+ assert_valid_callouts(exec_ctx, batch);
+ grpc_error *err = maybe_link_callout(batch, storage);
+ if (err != GRPC_ERROR_NONE) {
+ assert_valid_callouts(exec_ctx, batch);
+ return err;
+ }
link_tail(&batch->list, storage);
+ assert_valid_callouts(exec_ctx, batch);
+ return GRPC_ERROR_NONE;
}
-void grpc_metadata_batch_move(grpc_metadata_batch *dst,
- grpc_metadata_batch *src) {
- *dst = *src;
- memset(src, 0, sizeof(grpc_metadata_batch));
+static void unlink_storage(grpc_mdelem_list *list,
+ grpc_linked_mdelem *storage) {
+ assert_valid_list(list);
+ if (storage->prev != NULL) {
+ storage->prev->next = storage->next;
+ } else {
+ list->head = storage->next;
+ }
+ if (storage->next != NULL) {
+ storage->next->prev = storage->prev;
+ } else {
+ list->tail = storage->prev;
+ }
+ list->count--;
+ assert_valid_list(list);
}
-void grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
+void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch,
- grpc_mdelem *(*filter)(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_mdelem *elem),
- void *user_data) {
- grpc_linked_mdelem *l;
- grpc_linked_mdelem *next;
-
- GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
-
- assert_valid_list(&batch->list);
- for (l = batch->list.head; l; l = next) {
- grpc_mdelem *orig = l->md;
- grpc_mdelem *filt = filter(exec_ctx, user_data, orig);
- next = l->next;
- if (filt == NULL) {
- if (l->prev) {
- l->prev->next = l->next;
- }
- if (l->next) {
- l->next->prev = l->prev;
- }
- if (batch->list.head == l) {
- batch->list.head = l->next;
- }
- if (batch->list.tail == l) {
- batch->list.tail = l->prev;
- }
- assert_valid_list(&batch->list);
- GRPC_MDELEM_UNREF(exec_ctx, l->md);
- } else if (filt != orig) {
- GRPC_MDELEM_UNREF(exec_ctx, orig);
- l->md = filt;
- }
- }
- assert_valid_list(&batch->list);
+ grpc_linked_mdelem *storage) {
+ assert_valid_callouts(exec_ctx, batch);
+ maybe_unlink_callout(batch, storage);
+ unlink_storage(&batch->list, storage);
+ GRPC_MDELEM_UNREF(exec_ctx, storage->md);
+ assert_valid_callouts(exec_ctx, batch);
+}
- GPR_TIMER_END("grpc_metadata_batch_filter", 0);
+void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
+ grpc_linked_mdelem *storage,
+ grpc_slice value) {
+ grpc_mdelem old = storage->md;
+ grpc_mdelem new = grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old)), value);
+ storage->md = new;
+ GRPC_MDELEM_UNREF(exec_ctx, old);
}
-static grpc_mdelem *no_metadata_for_you(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_mdelem *elem) {
- return NULL;
+grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage,
+ grpc_mdelem new) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ grpc_mdelem old = storage->md;
+ if (!grpc_slice_eq(GRPC_MDKEY(new), GRPC_MDKEY(old))) {
+ maybe_unlink_callout(batch, storage);
+ storage->md = new;
+ error = maybe_link_callout(batch, storage);
+ } else {
+ storage->md = new;
+ }
+ GRPC_MDELEM_UNREF(exec_ctx, old);
+ return error;
}
void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch) {
- batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- grpc_metadata_batch_filter(exec_ctx, batch, no_metadata_for_you, NULL);
+ grpc_metadata_batch_destroy(exec_ctx, batch);
+ grpc_metadata_batch_init(batch);
}
bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
@@ -207,3 +291,33 @@ size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) {
}
return size;
}
+
+static void add_error(grpc_error **composite, grpc_error *error,
+ const char *composite_error_string) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE(composite_error_string);
+ }
+ *composite = grpc_error_add_child(*composite, error);
+}
+
+grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_metadata_batch_filter_func func,
+ void *user_data,
+ const char *composite_error_string) {
+ grpc_linked_mdelem *l = batch->list.head;
+ grpc_error *error = GRPC_ERROR_NONE;
+ while (l) {
+ grpc_linked_mdelem *next = l->next;
+ grpc_filtered_mdelem new = func(exec_ctx, user_data, l->md);
+ add_error(&error, new.error, composite_error_string);
+ if (GRPC_MDISNULL(new.md)) {
+ grpc_metadata_batch_remove(exec_ctx, batch, l);
+ } else if (new.md.payload != l->md.payload) {
+ grpc_metadata_batch_substitute(exec_ctx, batch, l, new.md);
+ }
+ l = next;
+ }
+ return error;
+}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index c0bd5174ab..5471539e82 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -41,19 +41,21 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
#include "src/core/lib/transport/metadata.h"
+#include "src/core/lib/transport/static_metadata.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct grpc_linked_mdelem {
- grpc_mdelem *md;
+ grpc_mdelem md;
struct grpc_linked_mdelem *next;
struct grpc_linked_mdelem *prev;
void *reserved;
} grpc_linked_mdelem;
typedef struct grpc_mdelem_list {
+ size_t count;
grpc_linked_mdelem *head;
grpc_linked_mdelem *tail;
} grpc_mdelem_list;
@@ -61,6 +63,7 @@ typedef struct grpc_mdelem_list {
typedef struct grpc_metadata_batch {
/** Metadata elements in this batch */
grpc_mdelem_list list;
+ grpc_metadata_batch_callouts idx;
/** Used to calculate grpc-timeout at the point of sending,
or gpr_inf_future if this batch does not need to send a
grpc-timeout */
@@ -77,25 +80,37 @@ bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch);
/* Returns the transport size of the batch. */
size_t grpc_metadata_batch_size(grpc_metadata_batch *batch);
-/** Moves the metadata information from \a src to \a dst. Upon return, \a src is
- * zeroed. */
-void grpc_metadata_batch_move(grpc_metadata_batch *dst,
- grpc_metadata_batch *src);
+/** Remove \a storage from the batch, unreffing the mdelem contained */
+void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage);
+
+/** Substitute a new mdelem for an old value */
+grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
+ grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage,
+ grpc_mdelem new_value);
+
+void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
+ grpc_linked_mdelem *storage,
+ grpc_slice value);
/** Add \a storage to the beginning of \a batch. storage->md is
assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
-void grpc_metadata_batch_link_head(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage);
+grpc_error *grpc_metadata_batch_link_head(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT;
/** Add \a storage to the end of \a batch. storage->md is
assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
-void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage);
+grpc_error *grpc_metadata_batch_link_tail(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT;
/** Add \a elem_to_add as the first element in \a batch, using
\a storage as backing storage for the linked list element.
@@ -103,29 +118,38 @@ void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
-void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
- grpc_mdelem *elem_to_add);
+grpc_error *grpc_metadata_batch_add_head(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
/** Add \a elem_to_add as the last element in \a batch, using
\a storage as backing storage for the linked list element.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
-void grpc_metadata_batch_add_tail(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
- grpc_mdelem *elem_to_add);
-
-/** For each element in \a batch, execute \a filter.
- The return value from \a filter will be substituted for the
- grpc_mdelem passed to \a filter. If \a filter returns NULL,
- the element will be moved to the garbage list. */
-void grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_mdelem *(*filter)(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_mdelem *elem),
- void *user_data);
+grpc_error *grpc_metadata_batch_add_tail(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
+ grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
+
+grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md);
+
+typedef struct {
+ grpc_error *error;
+ grpc_mdelem md;
+} grpc_filtered_mdelem;
+
+#define GRPC_FILTERED_ERROR(error) \
+ ((grpc_filtered_mdelem){(error), GRPC_MDNULL})
+#define GRPC_FILTERED_MDELEM(md) ((grpc_filtered_mdelem){GRPC_ERROR_NONE, (md)})
+#define GRPC_FILTERED_REMOVE() \
+ ((grpc_filtered_mdelem){GRPC_ERROR_NONE, GRPC_MDNULL})
+
+typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)(
+ grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem elem);
+grpc_error *grpc_metadata_batch_filter(
+ grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
+ grpc_metadata_batch_filter_func func, void *user_data,
+ const char *composite_error_string) GRPC_MUST_USE_RESULT;
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
diff --git a/src/core/lib/transport/method_config.c b/src/core/lib/transport/method_config.c
deleted file mode 100644
index 25fb54b37d..0000000000
--- a/src/core/lib/transport/method_config.c
+++ /dev/null
@@ -1,347 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/core/lib/transport/method_config.h"
-
-#include <string.h>
-
-#include <grpc/impl/codegen/grpc_types.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/time.h>
-
-#include "src/core/lib/transport/mdstr_hash_table.h"
-#include "src/core/lib/transport/metadata.h"
-
-//
-// grpc_method_config
-//
-
-// bool vtable
-
-static void* bool_copy(void* valuep) {
- bool value = *(bool*)valuep;
- bool* new_value = gpr_malloc(sizeof(bool));
- *new_value = value;
- return new_value;
-}
-
-static int bool_cmp(void* v1, void* v2) {
- bool b1 = *(bool*)v1;
- bool b2 = *(bool*)v2;
- if (!b1 && b2) return -1;
- if (b1 && !b2) return 1;
- return 0;
-}
-
-static void free_mem(grpc_exec_ctx* exec_ctx, void* p) { gpr_free(p); }
-
-static grpc_mdstr_hash_table_vtable bool_vtable = {free_mem, bool_copy,
- bool_cmp};
-
-// timespec vtable
-
-static void* timespec_copy(void* valuep) {
- gpr_timespec value = *(gpr_timespec*)valuep;
- gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
- *new_value = value;
- return new_value;
-}
-
-static int timespec_cmp(void* v1, void* v2) {
- return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
-}
-
-static grpc_mdstr_hash_table_vtable timespec_vtable = {free_mem, timespec_copy,
- timespec_cmp};
-
-// int32 vtable
-
-static void* int32_copy(void* valuep) {
- int32_t value = *(int32_t*)valuep;
- int32_t* new_value = gpr_malloc(sizeof(int32_t));
- *new_value = value;
- return new_value;
-}
-
-static int int32_cmp(void* v1, void* v2) {
- int32_t i1 = *(int32_t*)v1;
- int32_t i2 = *(int32_t*)v2;
- if (i1 < i2) return -1;
- if (i1 > i2) return 1;
- return 0;
-}
-
-static grpc_mdstr_hash_table_vtable int32_vtable = {free_mem, int32_copy,
- int32_cmp};
-
-// Hash table keys.
-#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
-#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
-#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
- "grpc.max_request_message_bytes" // int32
-#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
- "grpc.max_response_message_bytes" // int32
-
-struct grpc_method_config {
- grpc_mdstr_hash_table* table;
- grpc_mdstr* wait_for_ready_key;
- grpc_mdstr* timeout_key;
- grpc_mdstr* max_request_message_bytes_key;
- grpc_mdstr* max_response_message_bytes_key;
-};
-
-grpc_method_config* grpc_method_config_create(
- bool* wait_for_ready, gpr_timespec* timeout,
- int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
- grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
- memset(method_config, 0, sizeof(grpc_method_config));
- method_config->wait_for_ready_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
- method_config->timeout_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
- method_config->max_request_message_bytes_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
- method_config->max_response_message_bytes_key =
- grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
- grpc_mdstr_hash_table_entry entries[4];
- size_t num_entries = 0;
- if (wait_for_ready != NULL) {
- entries[num_entries].key = method_config->wait_for_ready_key;
- entries[num_entries].value = wait_for_ready;
- entries[num_entries].vtable = &bool_vtable;
- ++num_entries;
- }
- if (timeout != NULL) {
- entries[num_entries].key = method_config->timeout_key;
- entries[num_entries].value = timeout;
- entries[num_entries].vtable = &timespec_vtable;
- ++num_entries;
- }
- if (max_request_message_bytes != NULL) {
- entries[num_entries].key = method_config->max_request_message_bytes_key;
- entries[num_entries].value = max_request_message_bytes;
- entries[num_entries].vtable = &int32_vtable;
- ++num_entries;
- }
- if (max_response_message_bytes != NULL) {
- entries[num_entries].key = method_config->max_response_message_bytes_key;
- entries[num_entries].value = max_response_message_bytes;
- entries[num_entries].vtable = &int32_vtable;
- ++num_entries;
- }
- method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
- return method_config;
-}
-
-grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
- grpc_mdstr_hash_table_ref(method_config->table);
- return method_config;
-}
-
-void grpc_method_config_unref(grpc_exec_ctx* exec_ctx,
- grpc_method_config* method_config) {
- if (grpc_mdstr_hash_table_unref(exec_ctx, method_config->table)) {
- GRPC_MDSTR_UNREF(exec_ctx, method_config->wait_for_ready_key);
- GRPC_MDSTR_UNREF(exec_ctx, method_config->timeout_key);
- GRPC_MDSTR_UNREF(exec_ctx, method_config->max_request_message_bytes_key);
- GRPC_MDSTR_UNREF(exec_ctx, method_config->max_response_message_bytes_key);
- gpr_free(method_config);
- }
-}
-
-int grpc_method_config_cmp(const grpc_method_config* method_config1,
- const grpc_method_config* method_config2) {
- return grpc_mdstr_hash_table_cmp(method_config1->table,
- method_config2->table);
-}
-
-const bool* grpc_method_config_get_wait_for_ready(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(method_config->table,
- method_config->wait_for_ready_key);
-}
-
-const gpr_timespec* grpc_method_config_get_timeout(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(method_config->table,
- method_config->timeout_key);
-}
-
-const int32_t* grpc_method_config_get_max_request_message_bytes(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(
- method_config->table, method_config->max_request_message_bytes_key);
-}
-
-const int32_t* grpc_method_config_get_max_response_message_bytes(
- const grpc_method_config* method_config) {
- return grpc_mdstr_hash_table_get(
- method_config->table, method_config->max_response_message_bytes_key);
-}
-
-//
-// grpc_method_config_table
-//
-
-static void method_config_unref(grpc_exec_ctx* exec_ctx, void* valuep) {
- grpc_method_config_unref(exec_ctx, valuep);
-}
-
-static void* method_config_ref(void* valuep) {
- return grpc_method_config_ref(valuep);
-}
-
-static int method_config_cmp(void* valuep1, void* valuep2) {
- return grpc_method_config_cmp(valuep1, valuep2);
-}
-
-static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
- method_config_unref, method_config_ref, method_config_cmp};
-
-grpc_method_config_table* grpc_method_config_table_create(
- size_t num_entries, grpc_method_config_table_entry* entries) {
- grpc_mdstr_hash_table_entry* hash_table_entries =
- gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
- for (size_t i = 0; i < num_entries; ++i) {
- hash_table_entries[i].key = entries[i].method_name;
- hash_table_entries[i].value = entries[i].method_config;
- hash_table_entries[i].vtable = &method_config_table_vtable;
- }
- grpc_method_config_table* method_config_table =
- grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
- gpr_free(hash_table_entries);
- return method_config_table;
-}
-
-grpc_method_config_table* grpc_method_config_table_ref(
- grpc_method_config_table* table) {
- return grpc_mdstr_hash_table_ref(table);
-}
-
-void grpc_method_config_table_unref(grpc_exec_ctx* exec_ctx,
- grpc_method_config_table* table) {
- grpc_mdstr_hash_table_unref(exec_ctx, table);
-}
-
-int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
- const grpc_method_config_table* table2) {
- return grpc_mdstr_hash_table_cmp(table1, table2);
-}
-
-void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
- const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path) {
- void* value = grpc_mdstr_hash_table_get(table, path);
- // If we didn't find a match for the path, try looking for a wildcard
- // entry (i.e., change "/service/method" to "/service/*").
- if (value == NULL) {
- const char* path_str = grpc_mdstr_as_c_string(path);
- const char* sep = strrchr(path_str, '/') + 1;
- const size_t len = (size_t)(sep - path_str);
- char* buf = gpr_malloc(len + 2); // '*' and NUL
- memcpy(buf, path_str, len);
- buf[len] = '*';
- buf[len + 1] = '\0';
- grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
- gpr_free(buf);
- value = grpc_mdstr_hash_table_get(table, wildcard_path);
- GRPC_MDSTR_UNREF(exec_ctx, wildcard_path);
- }
- return value;
-}
-
-static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
-
-static void destroy_arg(grpc_exec_ctx* exec_ctx, void* p) {
- grpc_method_config_table_unref(exec_ctx, p);
-}
-
-static int cmp_arg(void* p1, void* p2) {
- return grpc_method_config_table_cmp(p1, p2);
-}
-
-static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
-
-grpc_arg grpc_method_config_table_create_channel_arg(
- grpc_method_config_table* table) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_SERVICE_CONFIG;
- arg.value.pointer.p = table;
- arg.value.pointer.vtable = &arg_vtable;
- return arg;
-}
-
-// State used by convert_entry() below.
-typedef struct conversion_state {
- void* (*convert_value)(const grpc_method_config* method_config);
- const grpc_mdstr_hash_table_vtable* vtable;
- size_t num_entries;
- grpc_mdstr_hash_table_entry* entries;
-} conversion_state;
-
-// A function to be passed to grpc_mdstr_hash_table_iterate() to create
-// a copy of the entries.
-static void convert_entry(const grpc_mdstr_hash_table_entry* entry,
- void* user_data) {
- conversion_state* state = user_data;
- state->entries[state->num_entries].key = GRPC_MDSTR_REF(entry->key);
- state->entries[state->num_entries].value = state->convert_value(entry->value);
- state->entries[state->num_entries].vtable = state->vtable;
- ++state->num_entries;
-}
-
-grpc_mdstr_hash_table* grpc_method_config_table_convert(
- grpc_exec_ctx* exec_ctx, const grpc_method_config_table* table,
- void* (*convert_value)(const grpc_method_config* method_config),
- const grpc_mdstr_hash_table_vtable* vtable) {
- // Create an array of the entries in the table with converted values.
- conversion_state state;
- state.convert_value = convert_value;
- state.vtable = vtable;
- state.num_entries = 0;
- state.entries = gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) *
- grpc_mdstr_hash_table_num_entries(table));
- grpc_mdstr_hash_table_iterate(table, convert_entry, &state);
- // Create a new table based on the array we just constructed.
- grpc_mdstr_hash_table* new_table =
- grpc_mdstr_hash_table_create(state.num_entries, state.entries);
- // Clean up the array.
- for (size_t i = 0; i < state.num_entries; ++i) {
- GRPC_MDSTR_UNREF(exec_ctx, state.entries[i].key);
- vtable->destroy_value(exec_ctx, state.entries[i].value);
- }
- gpr_free(state.entries);
- // Return the new table.
- return new_table;
-}
diff --git a/src/core/lib/transport/method_config.h b/src/core/lib/transport/method_config.h
deleted file mode 100644
index d17a493fd4..0000000000
--- a/src/core/lib/transport/method_config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-//
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
-#define GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
-
-#include <stdbool.h>
-
-#include <grpc/impl/codegen/gpr_types.h>
-#include <grpc/impl/codegen/grpc_types.h>
-
-#include "src/core/lib/transport/mdstr_hash_table.h"
-#include "src/core/lib/transport/metadata.h"
-
-/// Per-method configuration.
-typedef struct grpc_method_config grpc_method_config;
-
-/// Creates a grpc_method_config with the specified parameters.
-/// Any parameter may be NULL to indicate that the value is unset.
-///
-/// \a wait_for_ready indicates whether the client should wait until the
-/// request deadline for the channel to become ready, even if there is a
-/// temporary failure before the deadline while attempting to connect.
-///
-/// \a timeout indicates the timeout for calls.
-///
-/// \a max_request_message_bytes and \a max_response_message_bytes
-/// indicate the maximum sizes of the request (checked when sending) and
-/// response (checked when receiving) messages.
-grpc_method_config* grpc_method_config_create(
- bool* wait_for_ready, gpr_timespec* timeout,
- int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
-
-grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
-void grpc_method_config_unref(grpc_exec_ctx* exec_ctx,
- grpc_method_config* method_config);
-
-/// Compares two grpc_method_configs.
-/// The sort order is stable but undefined.
-int grpc_method_config_cmp(const grpc_method_config* method_config1,
- const grpc_method_config* method_config2);
-
-/// These methods return NULL if the requested field is unset.
-/// The caller does NOT take ownership of the result.
-const bool* grpc_method_config_get_wait_for_ready(
- const grpc_method_config* method_config);
-const gpr_timespec* grpc_method_config_get_timeout(
- const grpc_method_config* method_config);
-const int32_t* grpc_method_config_get_max_request_message_bytes(
- const grpc_method_config* method_config);
-const int32_t* grpc_method_config_get_max_response_message_bytes(
- const grpc_method_config* method_config);
-
-/// A table of method configs.
-typedef grpc_mdstr_hash_table grpc_method_config_table;
-
-typedef struct grpc_method_config_table_entry {
- /// The name is of one of the following forms:
- /// service/method -- specifies exact service and method name
- /// service/* -- matches all methods for the specified service
- grpc_mdstr* method_name;
- grpc_method_config* method_config;
-} grpc_method_config_table_entry;
-
-/// Takes new references to all keys and values in \a entries.
-grpc_method_config_table* grpc_method_config_table_create(
- size_t num_entries, grpc_method_config_table_entry* entries);
-
-grpc_method_config_table* grpc_method_config_table_ref(
- grpc_method_config_table* table);
-void grpc_method_config_table_unref(grpc_exec_ctx* exec_ctx,
- grpc_method_config_table* table);
-
-/// Compares two grpc_method_config_tables.
-/// The sort order is stable but undefined.
-int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
- const grpc_method_config_table* table2);
-
-/// Gets the method config for the specified \a path, which should be of
-/// the form "/service/method".
-/// Returns NULL if the method has no config.
-/// Caller does NOT own a reference to the result.
-///
-/// Note: This returns a void* instead of a grpc_method_config* so that
-/// it can also be used for tables constructed via
-/// grpc_method_config_table_convert().
-void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
- const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path);
-
-/// Returns a channel arg containing \a table.
-grpc_arg grpc_method_config_table_create_channel_arg(
- grpc_method_config_table* table);
-
-/// Generates a new table from \a table whose values are converted to a
-/// new form via the \a convert_value function. The new table will use
-/// \a vtable for its values.
-///
-/// This is generally used to convert the table's value type from
-/// grpc_method_config to a simple struct containing only the parameters
-/// relevant to a particular filter, thus avoiding the need for a hash
-/// table lookup on the fast path. In that scenario, \a convert_value
-/// will return a new instance of the struct containing the values from
-/// the grpc_method_config, and \a vtable provides the methods for
-/// operating on the struct type.
-grpc_mdstr_hash_table* grpc_method_config_table_convert(
- grpc_exec_ctx* exec_ctx, const grpc_method_config_table* table,
- void* (*convert_value)(const grpc_method_config* method_config),
- const grpc_mdstr_hash_table_vtable* vtable);
-
-#endif /* GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H */
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.c
index 552d3ec856..12da2a88fe 100644
--- a/src/core/lib/transport/service_config.c
+++ b/src/core/lib/transport/service_config.c
@@ -39,8 +39,10 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/json/json.h"
+#include "src/core/lib/slice/slice_hash_table.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
-#include "src/core/lib/transport/mdstr_hash_table.h"
// The main purpose of the code here is to parse the service config in
// JSON form, which will look like this:
@@ -148,8 +150,8 @@ static char* parse_json_method_name(grpc_json* json) {
static bool parse_json_method_config(
grpc_exec_ctx* exec_ctx, grpc_json* json,
void* (*create_value)(const grpc_json* method_config_json),
- const grpc_mdstr_hash_table_vtable* vtable,
- grpc_mdstr_hash_table_entry* entries, size_t* idx) {
+ const grpc_slice_hash_table_vtable* vtable,
+ grpc_slice_hash_table_entry* entries, size_t* idx) {
// Construct value.
void* method_config = create_value(json);
if (method_config == NULL) return false;
@@ -170,7 +172,7 @@ static bool parse_json_method_config(
if (paths.count == 0) goto done; // No names specified.
// Add entry for each path.
for (size_t i = 0; i < paths.count; ++i) {
- entries[*idx].key = grpc_mdstr_from_string(paths.strs[i]);
+ entries[*idx].key = grpc_slice_from_copied_string(paths.strs[i]);
entries[*idx].value = vtable->copy_value(method_config);
entries[*idx].vtable = vtable;
++*idx;
@@ -182,15 +184,15 @@ done:
return success;
}
-grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
+grpc_slice_hash_table* grpc_service_config_create_method_config_table(
grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
void* (*create_value)(const grpc_json* method_config_json),
- const grpc_mdstr_hash_table_vtable* vtable) {
+ const grpc_slice_hash_table_vtable* vtable) {
const grpc_json* json = service_config->json_tree;
// Traverse parsed JSON tree.
if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL;
size_t num_entries = 0;
- grpc_mdstr_hash_table_entry* entries = NULL;
+ grpc_slice_hash_table_entry* entries = NULL;
for (grpc_json* field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) return NULL;
if (strcmp(field->key, "methodConfig") == 0) {
@@ -202,7 +204,7 @@ grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
num_entries += count_names_in_method_config_json(method);
}
// Populate method config table entries.
- entries = gpr_malloc(num_entries * sizeof(grpc_mdstr_hash_table_entry));
+ entries = gpr_malloc(num_entries * sizeof(grpc_slice_hash_table_entry));
size_t idx = 0;
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
@@ -215,12 +217,12 @@ grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
}
}
// Instantiate method config table.
- grpc_mdstr_hash_table* method_config_table = NULL;
+ grpc_slice_hash_table* method_config_table = NULL;
if (entries != NULL) {
- method_config_table = grpc_mdstr_hash_table_create(num_entries, entries);
+ method_config_table = grpc_slice_hash_table_create(num_entries, entries);
// Clean up.
for (size_t i = 0; i < num_entries; ++i) {
- GRPC_MDSTR_UNREF(exec_ctx, entries[i].key);
+ grpc_slice_unref_internal(exec_ctx, entries[i].key);
vtable->destroy_value(exec_ctx, entries[i].value);
}
gpr_free(entries);
@@ -229,23 +231,24 @@ grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
}
void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
- const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path) {
- void* value = grpc_mdstr_hash_table_get(table, path);
+ const grpc_slice_hash_table* table,
+ grpc_slice path) {
+ void* value = grpc_slice_hash_table_get(table, path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
if (value == NULL) {
- const char* path_str = grpc_mdstr_as_c_string(path);
+ char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
- grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
+ grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
gpr_free(buf);
- value = grpc_mdstr_hash_table_get(table, wildcard_path);
- GRPC_MDSTR_UNREF(exec_ctx, wildcard_path);
+ value = grpc_slice_hash_table_get(table, wildcard_path);
+ grpc_slice_unref_internal(exec_ctx, wildcard_path);
+ gpr_free(path_str);
}
return value;
}
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
index f0897170fa..cd739a593c 100644
--- a/src/core/lib/transport/service_config.h
+++ b/src/core/lib/transport/service_config.h
@@ -35,7 +35,7 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/json/json.h"
-#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/slice/slice_hash_table.h"
typedef struct grpc_service_config grpc_service_config;
@@ -53,10 +53,10 @@ const char* grpc_service_config_get_lb_policy_name(
/// returned by \a create_value(), based on data parsed from the JSON tree.
/// \a vtable provides methods used to manage the values.
/// Returns NULL on error.
-grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
+grpc_slice_hash_table* grpc_service_config_create_method_config_table(
grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
void* (*create_value)(const grpc_json* method_config_json),
- const grpc_mdstr_hash_table_vtable* vtable);
+ const grpc_slice_hash_table_vtable* vtable);
/// A helper function for looking up values in the table returned by
/// \a grpc_service_config_create_method_config_table().
@@ -65,7 +65,7 @@ grpc_mdstr_hash_table* grpc_service_config_create_method_config_table(
/// Returns NULL if the method has no config.
/// Caller does NOT own a reference to the result.
void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
- const grpc_mdstr_hash_table* table,
- const grpc_mdstr* path);
+ const grpc_slice_hash_table* table,
+ grpc_slice path);
#endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 8b22592b45..5adc3216c9 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -41,120 +41,770 @@
#include "src/core/lib/transport/static_metadata.h"
-grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+#include "src/core/lib/slice/slice_internal.h"
+
+static uint8_t g_bytes[] = {
+ 58, 112, 97, 116, 104, 58, 109, 101, 116, 104, 111, 100, 58, 115, 116,
+ 97, 116, 117, 115, 58, 97, 117, 116, 104, 111, 114, 105, 116, 121, 58,
+ 115, 99, 104, 101, 109, 101, 116, 101, 103, 114, 112, 99, 45, 109, 101,
+ 115, 115, 97, 103, 101, 103, 114, 112, 99, 45, 115, 116, 97, 116, 117,
+ 115, 103, 114, 112, 99, 45, 112, 97, 121, 108, 111, 97, 100, 45, 98,
+ 105, 110, 103, 114, 112, 99, 45, 101, 110, 99, 111, 100, 105, 110, 103,
+ 103, 114, 112, 99, 45, 97, 99, 99, 101, 112, 116, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121,
+ 112, 101, 103, 114, 112, 99, 45, 105, 110, 116, 101, 114, 110, 97, 108,
+ 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101,
+ 115, 116, 117, 115, 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115,
+ 116, 108, 98, 45, 116, 111, 107, 101, 110, 108, 98, 45, 99, 111, 115,
+ 116, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
+ 117, 116, 103, 114, 112, 99, 45, 116, 114, 97, 99, 105, 110, 103, 45,
+ 98, 105, 110, 103, 114, 112, 99, 45, 115, 116, 97, 116, 115, 45, 98,
+ 105, 110, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
+ 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
+ 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
+ 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
+ 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
+ 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
+ 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
+ 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
+ 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
+ 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
+ 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
+ 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
+ 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
+ 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
+ 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
+ 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
+ 104, 97, 114, 115, 101, 116, 97, 99, 99, 101, 112, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 103, 122, 105, 112, 44, 32, 100, 101, 102,
+ 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
+ 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
+ 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
+ 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
+ 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
+ 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
+ 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
+ 110, 116, 101, 110, 116, 45, 101, 110, 99, 111, 100, 105, 110, 103, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101,
+ 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110,
+ 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111,
+ 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112,
+ 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105,
+ 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102,
+ 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110,
+ 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101,
+ 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115,
+ 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105,
+ 101, 100, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105, 111, 110, 109,
+ 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112, 114, 111, 120,
+ 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 112,
+ 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116,
+ 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101, 114, 101, 114,
+ 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121, 45, 97, 102,
+ 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116, 45, 99, 111,
+ 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116, 114, 97, 110,
+ 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105, 116, 121, 116,
+ 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111, 100, 105, 110,
+ 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45, 97, 117, 116,
+ 104, 101, 110, 116, 105, 99, 97, 116, 101, 105, 100, 101, 110, 116, 105,
+ 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116,
+ 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101,
+ 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100,
+ 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
+
+static void static_ref(void *unused) {}
+static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
+static const grpc_slice_refcount_vtable static_sub_vtable = {
+ static_ref, static_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+const grpc_slice_refcount_vtable grpc_static_metadata_vtable = {
+ static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash};
+static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable,
+ &static_sub_refcnt};
+grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+};
+
+const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
+ {.refcount = &grpc_static_metadata_refcounts[0],
+ .data.refcounted = {g_bytes + 0, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[1],
+ .data.refcounted = {g_bytes + 5, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[3],
+ .data.refcounted = {g_bytes + 19, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[4],
+ .data.refcounted = {g_bytes + 29, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[5],
+ .data.refcounted = {g_bytes + 36, 2}},
+ {.refcount = &grpc_static_metadata_refcounts[6],
+ .data.refcounted = {g_bytes + 38, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[7],
+ .data.refcounted = {g_bytes + 50, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[8],
+ .data.refcounted = {g_bytes + 61, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[9],
+ .data.refcounted = {g_bytes + 77, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[11],
+ .data.refcounted = {g_bytes + 110, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[12],
+ .data.refcounted = {g_bytes + 122, 30}},
+ {.refcount = &grpc_static_metadata_refcounts[13],
+ .data.refcounted = {g_bytes + 152, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[14],
+ .data.refcounted = {g_bytes + 162, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[15],
+ .data.refcounted = {g_bytes + 166, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 174, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[17],
+ .data.refcounted = {g_bytes + 185, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[18],
+ .data.refcounted = {g_bytes + 197, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 213, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}},
+ {.refcount = &grpc_static_metadata_refcounts[21],
+ .data.refcounted = {g_bytes + 227, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[22],
+ .data.refcounted = {g_bytes + 246, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 258, 30}},
+ {.refcount = &grpc_static_metadata_refcounts[24],
+ .data.refcounted = {g_bytes + 288, 31}},
+ {.refcount = &grpc_static_metadata_refcounts[25],
+ .data.refcounted = {g_bytes + 319, 36}},
+ {.refcount = &grpc_static_metadata_refcounts[26],
+ .data.refcounted = {g_bytes + 355, 1}},
+ {.refcount = &grpc_static_metadata_refcounts[27],
+ .data.refcounted = {g_bytes + 356, 1}},
+ {.refcount = &grpc_static_metadata_refcounts[28],
+ .data.refcounted = {g_bytes + 357, 1}},
+ {.refcount = &grpc_static_metadata_refcounts[29],
+ .data.refcounted = {g_bytes + 358, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[30],
+ .data.refcounted = {g_bytes + 366, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[31],
+ .data.refcounted = {g_bytes + 370, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 377, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 385, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[34],
+ .data.refcounted = {g_bytes + 401, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[35],
+ .data.refcounted = {g_bytes + 405, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[36],
+ .data.refcounted = {g_bytes + 408, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[37],
+ .data.refcounted = {g_bytes + 411, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[38],
+ .data.refcounted = {g_bytes + 415, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[39],
+ .data.refcounted = {g_bytes + 420, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[40],
+ .data.refcounted = {g_bytes + 424, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[41],
+ .data.refcounted = {g_bytes + 427, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[42],
+ .data.refcounted = {g_bytes + 430, 1}},
+ {.refcount = &grpc_static_metadata_refcounts[43],
+ .data.refcounted = {g_bytes + 431, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[44],
+ .data.refcounted = {g_bytes + 442, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[45],
+ .data.refcounted = {g_bytes + 445, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[46],
+ .data.refcounted = {g_bytes + 448, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[47],
+ .data.refcounted = {g_bytes + 451, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[48],
+ .data.refcounted = {g_bytes + 454, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[49],
+ .data.refcounted = {g_bytes + 457, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[50],
+ .data.refcounted = {g_bytes + 471, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[51],
+ .data.refcounted = {g_bytes + 486, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[52],
+ .data.refcounted = {g_bytes + 499, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[53],
+ .data.refcounted = {g_bytes + 514, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[54],
+ .data.refcounted = {g_bytes + 527, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[55],
+ .data.refcounted = {g_bytes + 533, 27}},
+ {.refcount = &grpc_static_metadata_refcounts[56],
+ .data.refcounted = {g_bytes + 560, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[57],
+ .data.refcounted = {g_bytes + 563, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[58],
+ .data.refcounted = {g_bytes + 568, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[59],
+ .data.refcounted = {g_bytes + 581, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[60],
+ .data.refcounted = {g_bytes + 594, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[61],
+ .data.refcounted = {g_bytes + 613, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[62],
+ .data.refcounted = {g_bytes + 629, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[63],
+ .data.refcounted = {g_bytes + 645, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[64],
+ .data.refcounted = {g_bytes + 659, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[65],
+ .data.refcounted = {g_bytes + 675, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[66],
+ .data.refcounted = {g_bytes + 688, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[67],
+ .data.refcounted = {g_bytes + 694, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[68],
+ .data.refcounted = {g_bytes + 698, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[69],
+ .data.refcounted = {g_bytes + 702, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[70],
+ .data.refcounted = {g_bytes + 708, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[71],
+ .data.refcounted = {g_bytes + 715, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[72],
+ .data.refcounted = {g_bytes + 719, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[73],
+ .data.refcounted = {g_bytes + 727, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[74],
+ .data.refcounted = {g_bytes + 744, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[75],
+ .data.refcounted = {g_bytes + 757, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[76],
+ .data.refcounted = {g_bytes + 765, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[77],
+ .data.refcounted = {g_bytes + 784, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[78],
+ .data.refcounted = {g_bytes + 797, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[79],
+ .data.refcounted = {g_bytes + 801, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[80],
+ .data.refcounted = {g_bytes + 809, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[81],
+ .data.refcounted = {g_bytes + 821, 18}},
+ {.refcount = &grpc_static_metadata_refcounts[82],
+ .data.refcounted = {g_bytes + 839, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[83],
+ .data.refcounted = {g_bytes + 858, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[84],
+ .data.refcounted = {g_bytes + 863, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[85],
+ .data.refcounted = {g_bytes + 870, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[86],
+ .data.refcounted = {g_bytes + 877, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[87],
+ .data.refcounted = {g_bytes + 888, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[88],
+ .data.refcounted = {g_bytes + 894, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[89],
+ .data.refcounted = {g_bytes + 904, 25}},
+ {.refcount = &grpc_static_metadata_refcounts[90],
+ .data.refcounted = {g_bytes + 929, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[91],
+ .data.refcounted = {g_bytes + 946, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[92],
+ .data.refcounted = {g_bytes + 950, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[93],
+ .data.refcounted = {g_bytes + 953, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[94],
+ .data.refcounted = {g_bytes + 969, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[95],
+ .data.refcounted = {g_bytes + 985, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[96],
+ .data.refcounted = {g_bytes + 998, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[97],
+ .data.refcounted = {g_bytes + 1010, 21}},
+};
-grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 4, 8, 6, 2, 4, 8, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8};
+
+static const int8_t elems_r[] = {
+ 10, 8, -3, 0, 9, 21, -76, 22, 0, 10, -7, 20, 0, 19, 18, 17,
+ 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, -49, -50, 16, -52, -53, -54, -54, -55, -56, -57, 0, 38, 37, 36, 35,
+ 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
+ 18, 17, 16, 15, 14, 13, 12, 15, 14, 13, 12, 11, 10, 9, 8, 0};
+static uint32_t elems_phash(uint32_t i) {
+ i -= 42;
+ uint32_t x = i % 96;
+ uint32_t y = i / 96;
+ uint32_t h = x;
+ if (y < GPR_ARRAY_SIZE(elems_r)) {
+ uint32_t delta = (uint32_t)elems_r[y];
+ h += delta;
+ }
+ return h;
+}
-const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
- {11, 33, 10, 33, 12, 33, 12, 50, 13, 33, 14, 33, 15, 33, 16, 33, 17, 33,
- 19, 33, 20, 33, 21, 33, 22, 33, 23, 33, 24, 33, 25, 33, 26, 33, 27, 33,
- 28, 18, 28, 33, 29, 33, 30, 33, 34, 33, 35, 33, 36, 33, 37, 33, 40, 31,
- 40, 32, 40, 49, 40, 54, 40, 55, 40, 56, 40, 57, 41, 31, 41, 49, 41, 54,
- 46, 0, 46, 1, 46, 2, 51, 33, 58, 33, 59, 33, 60, 33, 61, 33, 62, 33,
- 63, 33, 64, 33, 65, 33, 66, 33, 67, 33, 68, 33, 69, 38, 69, 71, 69, 74,
- 70, 82, 70, 83, 72, 33, 73, 33, 75, 33, 76, 33, 77, 33, 78, 33, 79, 39,
- 79, 52, 79, 53, 80, 33, 81, 33, 84, 3, 84, 4, 84, 5, 84, 6, 84, 7,
- 84, 8, 84, 9, 85, 33, 86, 87, 88, 33, 89, 33, 90, 33, 91, 33, 92, 33};
+static const uint16_t elem_keys[] = {
+ 1009, 1010, 1011, 240, 241, 242, 243, 244, 138, 139, 42, 43,
+ 429, 430, 431, 911, 912, 913, 712, 713, 1098, 522, 714, 1294,
+ 1392, 1490, 1588, 4822, 4920, 4951, 5116, 5214, 5312, 1111, 5410, 5508,
+ 5606, 5704, 5802, 5900, 5998, 6096, 6194, 6292, 6390, 6488, 6586, 6684,
+ 6782, 6880, 6978, 7076, 7174, 7272, 7370, 7468, 7566, 7664, 7762, 7860,
+ 7958, 8056, 8154, 8252, 8350, 1074, 1075, 1076, 1077, 8448, 8546, 8644,
+ 8742, 8840, 8938, 9036, 9134, 314, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 132, 231, 232, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0};
+static const uint8_t elem_idxs[] = {
+ 74, 77, 75, 19, 20, 21, 22, 23, 15, 16, 17, 18, 11, 12, 13,
+ 3, 4, 5, 0, 1, 41, 6, 2, 70, 48, 55, 56, 24, 25, 26,
+ 27, 28, 29, 7, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 42, 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 57, 58, 59,
+ 60, 61, 62, 63, 64, 76, 78, 79, 80, 65, 66, 67, 68, 69, 71,
+ 72, 73, 14, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 8, 9, 10};
-const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
- "0",
- "1",
- "2",
- "200",
- "204",
- "206",
- "304",
- "400",
- "404",
- "500",
- "accept",
- "accept-charset",
- "accept-encoding",
- "accept-language",
- "accept-ranges",
- "access-control-allow-origin",
- "age",
- "allow",
- "application/grpc",
- ":authority",
- "authorization",
- "cache-control",
- "content-disposition",
- "content-encoding",
- "content-language",
- "content-length",
- "content-location",
- "content-range",
- "content-type",
- "cookie",
- "date",
- "deflate",
- "deflate,gzip",
- "",
- "etag",
- "expect",
- "expires",
- "from",
- "GET",
- "grpc",
- "grpc-accept-encoding",
- "grpc-encoding",
- "grpc-internal-encoding-request",
- "grpc-message",
- "grpc-payload-bin",
- "grpc-stats-bin",
- "grpc-status",
- "grpc-timeout",
- "grpc-tracing-bin",
- "gzip",
- "gzip, deflate",
- "host",
- "http",
- "https",
- "identity",
- "identity,deflate",
- "identity,deflate,gzip",
- "identity,gzip",
- "if-match",
- "if-modified-since",
- "if-none-match",
- "if-range",
- "if-unmodified-since",
- "last-modified",
- "lb-cost-bin",
- "lb-token",
- "link",
- "location",
- "max-forwards",
- ":method",
- ":path",
- "POST",
- "proxy-authenticate",
- "proxy-authorization",
- "PUT",
- "range",
- "referer",
- "refresh",
- "retry-after",
- ":scheme",
- "server",
- "set-cookie",
- "/",
- "/index.html",
- ":status",
- "strict-transport-security",
- "te",
- "trailers",
- "transfer-encoding",
- "user-agent",
- "vary",
- "via",
- "www-authenticate"};
+grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
+ if (a == -1 || b == -1) return GRPC_MDNULL;
+ uint32_t k = (uint32_t)(a * 98 + b);
+ uint32_t h = elems_phash(k);
+ return elem_keys[h] == k
+ ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]],
+ GRPC_MDELEM_STORAGE_STATIC)
+ : GRPC_MDNULL;
+}
-const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 29, 26, 30,
- 28, 32, 27, 31};
+grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
+ {{.refcount = &grpc_static_metadata_refcounts[7],
+ .data.refcounted = {g_bytes + 50, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[26],
+ .data.refcounted = {g_bytes + 355, 1}}},
+ {{.refcount = &grpc_static_metadata_refcounts[7],
+ .data.refcounted = {g_bytes + 50, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[27],
+ .data.refcounted = {g_bytes + 356, 1}}},
+ {{.refcount = &grpc_static_metadata_refcounts[7],
+ .data.refcounted = {g_bytes + 50, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[28],
+ .data.refcounted = {g_bytes + 357, 1}}},
+ {{.refcount = &grpc_static_metadata_refcounts[9],
+ .data.refcounted = {g_bytes + 77, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[29],
+ .data.refcounted = {g_bytes + 358, 8}}},
+ {{.refcount = &grpc_static_metadata_refcounts[9],
+ .data.refcounted = {g_bytes + 77, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[30],
+ .data.refcounted = {g_bytes + 366, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[9],
+ .data.refcounted = {g_bytes + 77, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[31],
+ .data.refcounted = {g_bytes + 370, 7}}},
+ {{.refcount = &grpc_static_metadata_refcounts[5],
+ .data.refcounted = {g_bytes + 36, 2}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 377, 8}}},
+ {{.refcount = &grpc_static_metadata_refcounts[11],
+ .data.refcounted = {g_bytes + 110, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 385, 16}}},
+ {{.refcount = &grpc_static_metadata_refcounts[1],
+ .data.refcounted = {g_bytes + 5, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[34],
+ .data.refcounted = {g_bytes + 401, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[35],
+ .data.refcounted = {g_bytes + 405, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[36],
+ .data.refcounted = {g_bytes + 408, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[4],
+ .data.refcounted = {g_bytes + 29, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[37],
+ .data.refcounted = {g_bytes + 411, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[4],
+ .data.refcounted = {g_bytes + 29, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[38],
+ .data.refcounted = {g_bytes + 415, 5}}},
+ {{.refcount = &grpc_static_metadata_refcounts[4],
+ .data.refcounted = {g_bytes + 29, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[39],
+ .data.refcounted = {g_bytes + 420, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[3],
+ .data.refcounted = {g_bytes + 19, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[1],
+ .data.refcounted = {g_bytes + 5, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[40],
+ .data.refcounted = {g_bytes + 424, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[1],
+ .data.refcounted = {g_bytes + 5, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[41],
+ .data.refcounted = {g_bytes + 427, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[0],
+ .data.refcounted = {g_bytes + 0, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[42],
+ .data.refcounted = {g_bytes + 430, 1}}},
+ {{.refcount = &grpc_static_metadata_refcounts[0],
+ .data.refcounted = {g_bytes + 0, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[43],
+ .data.refcounted = {g_bytes + 431, 11}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[44],
+ .data.refcounted = {g_bytes + 442, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[45],
+ .data.refcounted = {g_bytes + 445, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[46],
+ .data.refcounted = {g_bytes + 448, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[47],
+ .data.refcounted = {g_bytes + 451, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[48],
+ .data.refcounted = {g_bytes + 454, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[49],
+ .data.refcounted = {g_bytes + 457, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[50],
+ .data.refcounted = {g_bytes + 471, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[50],
+ .data.refcounted = {g_bytes + 471, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[51],
+ .data.refcounted = {g_bytes + 486, 13}}},
+ {{.refcount = &grpc_static_metadata_refcounts[52],
+ .data.refcounted = {g_bytes + 499, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[53],
+ .data.refcounted = {g_bytes + 514, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[54],
+ .data.refcounted = {g_bytes + 527, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[55],
+ .data.refcounted = {g_bytes + 533, 27}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[56],
+ .data.refcounted = {g_bytes + 560, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[57],
+ .data.refcounted = {g_bytes + 563, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[58],
+ .data.refcounted = {g_bytes + 568, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[59],
+ .data.refcounted = {g_bytes + 581, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[60],
+ .data.refcounted = {g_bytes + 594, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[61],
+ .data.refcounted = {g_bytes + 613, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[62],
+ .data.refcounted = {g_bytes + 629, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[63],
+ .data.refcounted = {g_bytes + 645, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[64],
+ .data.refcounted = {g_bytes + 659, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[65],
+ .data.refcounted = {g_bytes + 675, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[11],
+ .data.refcounted = {g_bytes + 110, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[66],
+ .data.refcounted = {g_bytes + 688, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[67],
+ .data.refcounted = {g_bytes + 694, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[68],
+ .data.refcounted = {g_bytes + 698, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[69],
+ .data.refcounted = {g_bytes + 702, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[70],
+ .data.refcounted = {g_bytes + 708, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[71],
+ .data.refcounted = {g_bytes + 715, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[14],
+ .data.refcounted = {g_bytes + 162, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[72],
+ .data.refcounted = {g_bytes + 719, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[73],
+ .data.refcounted = {g_bytes + 727, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[74],
+ .data.refcounted = {g_bytes + 744, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[75],
+ .data.refcounted = {g_bytes + 757, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[76],
+ .data.refcounted = {g_bytes + 765, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[77],
+ .data.refcounted = {g_bytes + 784, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[15],
+ .data.refcounted = {g_bytes + 166, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 174, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[78],
+ .data.refcounted = {g_bytes + 797, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[79],
+ .data.refcounted = {g_bytes + 801, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[80],
+ .data.refcounted = {g_bytes + 809, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[81],
+ .data.refcounted = {g_bytes + 821, 18}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[82],
+ .data.refcounted = {g_bytes + 839, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[83],
+ .data.refcounted = {g_bytes + 858, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[84],
+ .data.refcounted = {g_bytes + 863, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[85],
+ .data.refcounted = {g_bytes + 870, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[86],
+ .data.refcounted = {g_bytes + 877, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[87],
+ .data.refcounted = {g_bytes + 888, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[88],
+ .data.refcounted = {g_bytes + 894, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[89],
+ .data.refcounted = {g_bytes + 904, 25}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[90],
+ .data.refcounted = {g_bytes + 929, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[13],
+ .data.refcounted = {g_bytes + 152, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[91],
+ .data.refcounted = {g_bytes + 946, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[92],
+ .data.refcounted = {g_bytes + 950, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[93],
+ .data.refcounted = {g_bytes + 953, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 227, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[29],
+ .data.refcounted = {g_bytes + 358, 8}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[31],
+ .data.refcounted = {g_bytes + 370, 7}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[94],
+ .data.refcounted = {g_bytes + 969, 16}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[30],
+ .data.refcounted = {g_bytes + 366, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[95],
+ .data.refcounted = {g_bytes + 985, 13}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[96],
+ .data.refcounted = {g_bytes + 998, 12}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[97],
+ .data.refcounted = {g_bytes + 1010, 21}}},
+};
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 74, 75, 76,
+ 77, 78, 79, 80};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 28ad6f2961..7649ccd5d2 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -44,375 +44,521 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 93
-extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+#define GRPC_STATIC_MDSTR_COUNT 98
+extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
+/* ":path" */
+#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
+/* ":method" */
+#define GRPC_MDSTR_METHOD (grpc_static_slice_table[1])
+/* ":status" */
+#define GRPC_MDSTR_STATUS (grpc_static_slice_table[2])
+/* ":authority" */
+#define GRPC_MDSTR_AUTHORITY (grpc_static_slice_table[3])
+/* ":scheme" */
+#define GRPC_MDSTR_SCHEME (grpc_static_slice_table[4])
+/* "te" */
+#define GRPC_MDSTR_TE (grpc_static_slice_table[5])
+/* "grpc-message" */
+#define GRPC_MDSTR_GRPC_MESSAGE (grpc_static_slice_table[6])
+/* "grpc-status" */
+#define GRPC_MDSTR_GRPC_STATUS (grpc_static_slice_table[7])
+/* "grpc-payload-bin" */
+#define GRPC_MDSTR_GRPC_PAYLOAD_BIN (grpc_static_slice_table[8])
+/* "grpc-encoding" */
+#define GRPC_MDSTR_GRPC_ENCODING (grpc_static_slice_table[9])
+/* "grpc-accept-encoding" */
+#define GRPC_MDSTR_GRPC_ACCEPT_ENCODING (grpc_static_slice_table[10])
+/* "content-type" */
+#define GRPC_MDSTR_CONTENT_TYPE (grpc_static_slice_table[11])
+/* "grpc-internal-encoding-request" */
+#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (grpc_static_slice_table[12])
+/* "user-agent" */
+#define GRPC_MDSTR_USER_AGENT (grpc_static_slice_table[13])
+/* "host" */
+#define GRPC_MDSTR_HOST (grpc_static_slice_table[14])
+/* "lb-token" */
+#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[15])
+/* "lb-cost-bin" */
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[16])
+/* "grpc-timeout" */
+#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[17])
+/* "grpc-tracing-bin" */
+#define GRPC_MDSTR_GRPC_TRACING_BIN (grpc_static_slice_table[18])
+/* "grpc-stats-bin" */
+#define GRPC_MDSTR_GRPC_STATS_BIN (grpc_static_slice_table[19])
+/* "" */
+#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[20])
+/* "grpc.wait_for_ready" */
+#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[21])
+/* "grpc.timeout" */
+#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[22])
+/* "grpc.max_request_message_bytes" */
+#define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \
+ (grpc_static_slice_table[23])
+/* "grpc.max_response_message_bytes" */
+#define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \
+ (grpc_static_slice_table[24])
+/* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */
+#define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \
+ (grpc_static_slice_table[25])
/* "0" */
-#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[26])
/* "1" */
-#define GRPC_MDSTR_1 (&grpc_static_mdstr_table[1])
+#define GRPC_MDSTR_1 (grpc_static_slice_table[27])
/* "2" */
-#define GRPC_MDSTR_2 (&grpc_static_mdstr_table[2])
+#define GRPC_MDSTR_2 (grpc_static_slice_table[28])
+/* "identity" */
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[29])
+/* "gzip" */
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[30])
+/* "deflate" */
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[31])
+/* "trailers" */
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[32])
+/* "application/grpc" */
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[33])
+/* "POST" */
+#define GRPC_MDSTR_POST (grpc_static_slice_table[34])
/* "200" */
-#define GRPC_MDSTR_200 (&grpc_static_mdstr_table[3])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[35])
+/* "404" */
+#define GRPC_MDSTR_404 (grpc_static_slice_table[36])
+/* "http" */
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[37])
+/* "https" */
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[38])
+/* "grpc" */
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[39])
+/* "GET" */
+#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
+/* "PUT" */
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[41])
+/* "/" */
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
+/* "/index.html" */
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
/* "204" */
-#define GRPC_MDSTR_204 (&grpc_static_mdstr_table[4])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[44])
/* "206" */
-#define GRPC_MDSTR_206 (&grpc_static_mdstr_table[5])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[45])
/* "304" */
-#define GRPC_MDSTR_304 (&grpc_static_mdstr_table[6])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[46])
/* "400" */
-#define GRPC_MDSTR_400 (&grpc_static_mdstr_table[7])
-/* "404" */
-#define GRPC_MDSTR_404 (&grpc_static_mdstr_table[8])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[47])
/* "500" */
-#define GRPC_MDSTR_500 (&grpc_static_mdstr_table[9])
-/* "accept" */
-#define GRPC_MDSTR_ACCEPT (&grpc_static_mdstr_table[10])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[48])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (&grpc_static_mdstr_table[11])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[49])
/* "accept-encoding" */
-#define GRPC_MDSTR_ACCEPT_ENCODING (&grpc_static_mdstr_table[12])
+#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[50])
+/* "gzip, deflate" */
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[51])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (&grpc_static_mdstr_table[13])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[52])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (&grpc_static_mdstr_table[14])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[53])
+/* "accept" */
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[54])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (&grpc_static_mdstr_table[15])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[55])
/* "age" */
-#define GRPC_MDSTR_AGE (&grpc_static_mdstr_table[16])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[56])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (&grpc_static_mdstr_table[17])
-/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (&grpc_static_mdstr_table[18])
-/* ":authority" */
-#define GRPC_MDSTR_AUTHORITY (&grpc_static_mdstr_table[19])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[57])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (&grpc_static_mdstr_table[20])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[58])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (&grpc_static_mdstr_table[21])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[59])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (&grpc_static_mdstr_table[22])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[60])
/* "content-encoding" */
-#define GRPC_MDSTR_CONTENT_ENCODING (&grpc_static_mdstr_table[23])
+#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[61])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (&grpc_static_mdstr_table[24])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[62])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (&grpc_static_mdstr_table[25])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[63])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (&grpc_static_mdstr_table[26])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[64])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (&grpc_static_mdstr_table[27])
-/* "content-type" */
-#define GRPC_MDSTR_CONTENT_TYPE (&grpc_static_mdstr_table[28])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[65])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (&grpc_static_mdstr_table[29])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[66])
/* "date" */
-#define GRPC_MDSTR_DATE (&grpc_static_mdstr_table[30])
-/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (&grpc_static_mdstr_table[31])
-/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (&grpc_static_mdstr_table[32])
-/* "" */
-#define GRPC_MDSTR_EMPTY (&grpc_static_mdstr_table[33])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[67])
/* "etag" */
-#define GRPC_MDSTR_ETAG (&grpc_static_mdstr_table[34])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[68])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (&grpc_static_mdstr_table[35])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[69])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (&grpc_static_mdstr_table[36])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[70])
/* "from" */
-#define GRPC_MDSTR_FROM (&grpc_static_mdstr_table[37])
-/* "GET" */
-#define GRPC_MDSTR_GET (&grpc_static_mdstr_table[38])
-/* "grpc" */
-#define GRPC_MDSTR_GRPC (&grpc_static_mdstr_table[39])
-/* "grpc-accept-encoding" */
-#define GRPC_MDSTR_GRPC_ACCEPT_ENCODING (&grpc_static_mdstr_table[40])
-/* "grpc-encoding" */
-#define GRPC_MDSTR_GRPC_ENCODING (&grpc_static_mdstr_table[41])
-/* "grpc-internal-encoding-request" */
-#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (&grpc_static_mdstr_table[42])
-/* "grpc-message" */
-#define GRPC_MDSTR_GRPC_MESSAGE (&grpc_static_mdstr_table[43])
-/* "grpc-payload-bin" */
-#define GRPC_MDSTR_GRPC_PAYLOAD_BIN (&grpc_static_mdstr_table[44])
-/* "grpc-stats-bin" */
-#define GRPC_MDSTR_GRPC_STATS_BIN (&grpc_static_mdstr_table[45])
-/* "grpc-status" */
-#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[46])
-/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[47])
-/* "grpc-tracing-bin" */
-#define GRPC_MDSTR_GRPC_TRACING_BIN (&grpc_static_mdstr_table[48])
-/* "gzip" */
-#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[49])
-/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[50])
-/* "host" */
-#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[51])
-/* "http" */
-#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[52])
-/* "https" */
-#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[53])
-/* "identity" */
-#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[54])
-/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[55])
-/* "identity,deflate,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (&grpc_static_mdstr_table[56])
-/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[57])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[71])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[58])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[72])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[59])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[73])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[60])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[74])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[61])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[75])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[76])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63])
-/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (&grpc_static_mdstr_table[64])
-/* "lb-token" */
-#define GRPC_MDSTR_LB_TOKEN (&grpc_static_mdstr_table[65])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[77])
/* "link" */
-#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[66])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[78])
/* "location" */
-#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[79])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[68])
-/* ":method" */
-#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[69])
-/* ":path" */
-#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[70])
-/* "POST" */
-#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[71])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[80])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[72])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[81])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[73])
-/* "PUT" */
-#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[74])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[82])
/* "range" */
-#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[75])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[83])
/* "referer" */
-#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[76])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[84])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[77])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[85])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[78])
-/* ":scheme" */
-#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[79])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[86])
/* "server" */
-#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[80])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[87])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[81])
-/* "/" */
-#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[82])
-/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[83])
-/* ":status" */
-#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[84])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[88])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[85])
-/* "te" */
-#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[86])
-/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[87])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[89])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[88])
-/* "user-agent" */
-#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[89])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[90])
/* "vary" */
-#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[90])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[91])
/* "via" */
-#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[91])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[92])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[92])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[93])
+/* "identity,deflate" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[94])
+/* "identity,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[95])
+/* "deflate,gzip" */
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[96])
+/* "identity,deflate,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (grpc_static_slice_table[97])
+
+extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
+extern grpc_slice_refcount
+ grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT];
+#define GRPC_IS_STATIC_METADATA_STRING(slice) \
+ ((slice).refcount != NULL && \
+ (slice).refcount->vtable == &grpc_static_metadata_vtable)
+
+#define GRPC_STATIC_METADATA_INDEX(static_slice) \
+ ((int)((static_slice).refcount - grpc_static_metadata_refcounts))
#define GRPC_STATIC_MDELEM_COUNT 81
-extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+extern grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
+/* "grpc-status": "0" */
+#define GRPC_MDELEM_GRPC_STATUS_0 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[0], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-status": "1" */
+#define GRPC_MDELEM_GRPC_STATUS_1 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[1], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-status": "2" */
+#define GRPC_MDELEM_GRPC_STATUS_2 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[2], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[3], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[4], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ENCODING_DEFLATE \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[5], GRPC_MDELEM_STORAGE_STATIC))
+/* "te": "trailers" */
+#define GRPC_MDELEM_TE_TRAILERS \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[6], GRPC_MDELEM_STORAGE_STATIC))
+/* "content-type": "application/grpc" */
+#define GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[7], GRPC_MDELEM_STORAGE_STATIC))
+/* ":method": "POST" */
+#define GRPC_MDELEM_METHOD_POST \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[8], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "200" */
+#define GRPC_MDELEM_STATUS_200 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[9], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "404" */
+#define GRPC_MDELEM_STATUS_404 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[10], GRPC_MDELEM_STORAGE_STATIC))
+/* ":scheme": "http" */
+#define GRPC_MDELEM_SCHEME_HTTP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[11], GRPC_MDELEM_STORAGE_STATIC))
+/* ":scheme": "https" */
+#define GRPC_MDELEM_SCHEME_HTTPS \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[12], GRPC_MDELEM_STORAGE_STATIC))
+/* ":scheme": "grpc" */
+#define GRPC_MDELEM_SCHEME_GRPC \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[13], GRPC_MDELEM_STORAGE_STATIC))
+/* ":authority": "" */
+#define GRPC_MDELEM_AUTHORITY_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[14], GRPC_MDELEM_STORAGE_STATIC))
+/* ":method": "GET" */
+#define GRPC_MDELEM_METHOD_GET \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[15], GRPC_MDELEM_STORAGE_STATIC))
+/* ":method": "PUT" */
+#define GRPC_MDELEM_METHOD_PUT \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[16], GRPC_MDELEM_STORAGE_STATIC))
+/* ":path": "/" */
+#define GRPC_MDELEM_PATH_SLASH \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[17], GRPC_MDELEM_STORAGE_STATIC))
+/* ":path": "/index.html" */
+#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[18], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "204" */
+#define GRPC_MDELEM_STATUS_204 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[19], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "206" */
+#define GRPC_MDELEM_STATUS_206 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[20], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "304" */
+#define GRPC_MDELEM_STATUS_304 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[21], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "400" */
+#define GRPC_MDELEM_STATUS_400 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[22], GRPC_MDELEM_STORAGE_STATIC))
+/* ":status": "500" */
+#define GRPC_MDELEM_STATUS_500 \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[23], GRPC_MDELEM_STORAGE_STATIC))
/* "accept-charset": "" */
-#define GRPC_MDELEM_ACCEPT_CHARSET_EMPTY (&grpc_static_mdelem_table[0])
-/* "accept": "" */
-#define GRPC_MDELEM_ACCEPT_EMPTY (&grpc_static_mdelem_table[1])
+#define GRPC_MDELEM_ACCEPT_CHARSET_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[24], GRPC_MDELEM_STORAGE_STATIC))
/* "accept-encoding": "" */
-#define GRPC_MDELEM_ACCEPT_ENCODING_EMPTY (&grpc_static_mdelem_table[2])
+#define GRPC_MDELEM_ACCEPT_ENCODING_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[25], GRPC_MDELEM_STORAGE_STATIC))
/* "accept-encoding": "gzip, deflate" */
#define GRPC_MDELEM_ACCEPT_ENCODING_GZIP_COMMA_DEFLATE \
- (&grpc_static_mdelem_table[3])
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[26], GRPC_MDELEM_STORAGE_STATIC))
/* "accept-language": "" */
-#define GRPC_MDELEM_ACCEPT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[4])
+#define GRPC_MDELEM_ACCEPT_LANGUAGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[27], GRPC_MDELEM_STORAGE_STATIC))
/* "accept-ranges": "" */
-#define GRPC_MDELEM_ACCEPT_RANGES_EMPTY (&grpc_static_mdelem_table[5])
+#define GRPC_MDELEM_ACCEPT_RANGES_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[28], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept": "" */
+#define GRPC_MDELEM_ACCEPT_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[29], GRPC_MDELEM_STORAGE_STATIC))
/* "access-control-allow-origin": "" */
#define GRPC_MDELEM_ACCESS_CONTROL_ALLOW_ORIGIN_EMPTY \
- (&grpc_static_mdelem_table[6])
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[30], GRPC_MDELEM_STORAGE_STATIC))
/* "age": "" */
-#define GRPC_MDELEM_AGE_EMPTY (&grpc_static_mdelem_table[7])
+#define GRPC_MDELEM_AGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[31], GRPC_MDELEM_STORAGE_STATIC))
/* "allow": "" */
-#define GRPC_MDELEM_ALLOW_EMPTY (&grpc_static_mdelem_table[8])
-/* ":authority": "" */
-#define GRPC_MDELEM_AUTHORITY_EMPTY (&grpc_static_mdelem_table[9])
+#define GRPC_MDELEM_ALLOW_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[32], GRPC_MDELEM_STORAGE_STATIC))
/* "authorization": "" */
-#define GRPC_MDELEM_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[10])
+#define GRPC_MDELEM_AUTHORIZATION_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[33], GRPC_MDELEM_STORAGE_STATIC))
/* "cache-control": "" */
-#define GRPC_MDELEM_CACHE_CONTROL_EMPTY (&grpc_static_mdelem_table[11])
+#define GRPC_MDELEM_CACHE_CONTROL_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[34], GRPC_MDELEM_STORAGE_STATIC))
/* "content-disposition": "" */
-#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY (&grpc_static_mdelem_table[12])
+#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[35], GRPC_MDELEM_STORAGE_STATIC))
/* "content-encoding": "" */
-#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY (&grpc_static_mdelem_table[13])
+#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[36], GRPC_MDELEM_STORAGE_STATIC))
/* "content-language": "" */
-#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[14])
+#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[37], GRPC_MDELEM_STORAGE_STATIC))
/* "content-length": "" */
-#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY (&grpc_static_mdelem_table[15])
+#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[38], GRPC_MDELEM_STORAGE_STATIC))
/* "content-location": "" */
-#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY (&grpc_static_mdelem_table[16])
+#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[39], GRPC_MDELEM_STORAGE_STATIC))
/* "content-range": "" */
-#define GRPC_MDELEM_CONTENT_RANGE_EMPTY (&grpc_static_mdelem_table[17])
-/* "content-type": "application/grpc" */
-#define GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC \
- (&grpc_static_mdelem_table[18])
+#define GRPC_MDELEM_CONTENT_RANGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[40], GRPC_MDELEM_STORAGE_STATIC))
/* "content-type": "" */
-#define GRPC_MDELEM_CONTENT_TYPE_EMPTY (&grpc_static_mdelem_table[19])
+#define GRPC_MDELEM_CONTENT_TYPE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[41], GRPC_MDELEM_STORAGE_STATIC))
/* "cookie": "" */
-#define GRPC_MDELEM_COOKIE_EMPTY (&grpc_static_mdelem_table[20])
+#define GRPC_MDELEM_COOKIE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[42], GRPC_MDELEM_STORAGE_STATIC))
/* "date": "" */
-#define GRPC_MDELEM_DATE_EMPTY (&grpc_static_mdelem_table[21])
+#define GRPC_MDELEM_DATE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[43], GRPC_MDELEM_STORAGE_STATIC))
/* "etag": "" */
-#define GRPC_MDELEM_ETAG_EMPTY (&grpc_static_mdelem_table[22])
+#define GRPC_MDELEM_ETAG_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[44], GRPC_MDELEM_STORAGE_STATIC))
/* "expect": "" */
-#define GRPC_MDELEM_EXPECT_EMPTY (&grpc_static_mdelem_table[23])
+#define GRPC_MDELEM_EXPECT_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[45], GRPC_MDELEM_STORAGE_STATIC))
/* "expires": "" */
-#define GRPC_MDELEM_EXPIRES_EMPTY (&grpc_static_mdelem_table[24])
+#define GRPC_MDELEM_EXPIRES_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[46], GRPC_MDELEM_STORAGE_STATIC))
/* "from": "" */
-#define GRPC_MDELEM_FROM_EMPTY (&grpc_static_mdelem_table[25])
-/* "grpc-accept-encoding": "deflate" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE (&grpc_static_mdelem_table[26])
-/* "grpc-accept-encoding": "deflate,gzip" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
- (&grpc_static_mdelem_table[27])
-/* "grpc-accept-encoding": "gzip" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP (&grpc_static_mdelem_table[28])
-/* "grpc-accept-encoding": "identity" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
- (&grpc_static_mdelem_table[29])
-/* "grpc-accept-encoding": "identity,deflate" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
- (&grpc_static_mdelem_table[30])
-/* "grpc-accept-encoding": "identity,deflate,gzip" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (&grpc_static_mdelem_table[31])
-/* "grpc-accept-encoding": "identity,gzip" */
-#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
- (&grpc_static_mdelem_table[32])
-/* "grpc-encoding": "deflate" */
-#define GRPC_MDELEM_GRPC_ENCODING_DEFLATE (&grpc_static_mdelem_table[33])
-/* "grpc-encoding": "gzip" */
-#define GRPC_MDELEM_GRPC_ENCODING_GZIP (&grpc_static_mdelem_table[34])
-/* "grpc-encoding": "identity" */
-#define GRPC_MDELEM_GRPC_ENCODING_IDENTITY (&grpc_static_mdelem_table[35])
-/* "grpc-status": "0" */
-#define GRPC_MDELEM_GRPC_STATUS_0 (&grpc_static_mdelem_table[36])
-/* "grpc-status": "1" */
-#define GRPC_MDELEM_GRPC_STATUS_1 (&grpc_static_mdelem_table[37])
-/* "grpc-status": "2" */
-#define GRPC_MDELEM_GRPC_STATUS_2 (&grpc_static_mdelem_table[38])
+#define GRPC_MDELEM_FROM_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[47], GRPC_MDELEM_STORAGE_STATIC))
/* "host": "" */
-#define GRPC_MDELEM_HOST_EMPTY (&grpc_static_mdelem_table[39])
+#define GRPC_MDELEM_HOST_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[48], GRPC_MDELEM_STORAGE_STATIC))
/* "if-match": "" */
-#define GRPC_MDELEM_IF_MATCH_EMPTY (&grpc_static_mdelem_table[40])
+#define GRPC_MDELEM_IF_MATCH_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[49], GRPC_MDELEM_STORAGE_STATIC))
/* "if-modified-since": "" */
-#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[41])
+#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[50], GRPC_MDELEM_STORAGE_STATIC))
/* "if-none-match": "" */
-#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY (&grpc_static_mdelem_table[42])
+#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[51], GRPC_MDELEM_STORAGE_STATIC))
/* "if-range": "" */
-#define GRPC_MDELEM_IF_RANGE_EMPTY (&grpc_static_mdelem_table[43])
+#define GRPC_MDELEM_IF_RANGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[52], GRPC_MDELEM_STORAGE_STATIC))
/* "if-unmodified-since": "" */
-#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44])
+#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[53], GRPC_MDELEM_STORAGE_STATIC))
/* "last-modified": "" */
-#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
-/* "lb-cost-bin": "" */
-#define GRPC_MDELEM_LB_COST_BIN_EMPTY (&grpc_static_mdelem_table[46])
+#define GRPC_MDELEM_LAST_MODIFIED_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[54], GRPC_MDELEM_STORAGE_STATIC))
/* "lb-token": "" */
-#define GRPC_MDELEM_LB_TOKEN_EMPTY (&grpc_static_mdelem_table[47])
+#define GRPC_MDELEM_LB_TOKEN_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
+/* "lb-cost-bin": "" */
+#define GRPC_MDELEM_LB_COST_BIN_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
/* "link": "" */
-#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[48])
+#define GRPC_MDELEM_LINK_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
/* "location": "" */
-#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[49])
+#define GRPC_MDELEM_LOCATION_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
/* "max-forwards": "" */
-#define GRPC_MDELEM_MAX_FORWARDS_EMPTY (&grpc_static_mdelem_table[50])
-/* ":method": "GET" */
-#define GRPC_MDELEM_METHOD_GET (&grpc_static_mdelem_table[51])
-/* ":method": "POST" */
-#define GRPC_MDELEM_METHOD_POST (&grpc_static_mdelem_table[52])
-/* ":method": "PUT" */
-#define GRPC_MDELEM_METHOD_PUT (&grpc_static_mdelem_table[53])
-/* ":path": "/" */
-#define GRPC_MDELEM_PATH_SLASH (&grpc_static_mdelem_table[54])
-/* ":path": "/index.html" */
-#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML (&grpc_static_mdelem_table[55])
+#define GRPC_MDELEM_MAX_FORWARDS_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authenticate": "" */
-#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[56])
+#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authorization": "" */
-#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[57])
+#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
/* "range": "" */
-#define GRPC_MDELEM_RANGE_EMPTY (&grpc_static_mdelem_table[58])
+#define GRPC_MDELEM_RANGE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
/* "referer": "" */
-#define GRPC_MDELEM_REFERER_EMPTY (&grpc_static_mdelem_table[59])
+#define GRPC_MDELEM_REFERER_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
/* "refresh": "" */
-#define GRPC_MDELEM_REFRESH_EMPTY (&grpc_static_mdelem_table[60])
+#define GRPC_MDELEM_REFRESH_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
/* "retry-after": "" */
-#define GRPC_MDELEM_RETRY_AFTER_EMPTY (&grpc_static_mdelem_table[61])
-/* ":scheme": "grpc" */
-#define GRPC_MDELEM_SCHEME_GRPC (&grpc_static_mdelem_table[62])
-/* ":scheme": "http" */
-#define GRPC_MDELEM_SCHEME_HTTP (&grpc_static_mdelem_table[63])
-/* ":scheme": "https" */
-#define GRPC_MDELEM_SCHEME_HTTPS (&grpc_static_mdelem_table[64])
+#define GRPC_MDELEM_RETRY_AFTER_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
/* "server": "" */
-#define GRPC_MDELEM_SERVER_EMPTY (&grpc_static_mdelem_table[65])
+#define GRPC_MDELEM_SERVER_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
/* "set-cookie": "" */
-#define GRPC_MDELEM_SET_COOKIE_EMPTY (&grpc_static_mdelem_table[66])
-/* ":status": "200" */
-#define GRPC_MDELEM_STATUS_200 (&grpc_static_mdelem_table[67])
-/* ":status": "204" */
-#define GRPC_MDELEM_STATUS_204 (&grpc_static_mdelem_table[68])
-/* ":status": "206" */
-#define GRPC_MDELEM_STATUS_206 (&grpc_static_mdelem_table[69])
-/* ":status": "304" */
-#define GRPC_MDELEM_STATUS_304 (&grpc_static_mdelem_table[70])
-/* ":status": "400" */
-#define GRPC_MDELEM_STATUS_400 (&grpc_static_mdelem_table[71])
-/* ":status": "404" */
-#define GRPC_MDELEM_STATUS_404 (&grpc_static_mdelem_table[72])
-/* ":status": "500" */
-#define GRPC_MDELEM_STATUS_500 (&grpc_static_mdelem_table[73])
+#define GRPC_MDELEM_SET_COOKIE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
/* "strict-transport-security": "" */
#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
- (&grpc_static_mdelem_table[74])
-/* "te": "trailers" */
-#define GRPC_MDELEM_TE_TRAILERS (&grpc_static_mdelem_table[75])
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
/* "transfer-encoding": "" */
-#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY (&grpc_static_mdelem_table[76])
+#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
/* "user-agent": "" */
-#define GRPC_MDELEM_USER_AGENT_EMPTY (&grpc_static_mdelem_table[77])
+#define GRPC_MDELEM_USER_AGENT_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
/* "vary": "" */
-#define GRPC_MDELEM_VARY_EMPTY (&grpc_static_mdelem_table[78])
+#define GRPC_MDELEM_VARY_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
/* "via": "" */
-#define GRPC_MDELEM_VIA_EMPTY (&grpc_static_mdelem_table[79])
+#define GRPC_MDELEM_VIA_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
/* "www-authenticate": "" */
-#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[80])
+#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "identity,deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "identity,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
+/* "grpc-accept-encoding": "identity,deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
+
+grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);
+typedef enum {
+ GRPC_BATCH_PATH,
+ GRPC_BATCH_METHOD,
+ GRPC_BATCH_STATUS,
+ GRPC_BATCH_AUTHORITY,
+ GRPC_BATCH_SCHEME,
+ GRPC_BATCH_TE,
+ GRPC_BATCH_GRPC_MESSAGE,
+ GRPC_BATCH_GRPC_STATUS,
+ GRPC_BATCH_GRPC_PAYLOAD_BIN,
+ GRPC_BATCH_GRPC_ENCODING,
+ GRPC_BATCH_GRPC_ACCEPT_ENCODING,
+ GRPC_BATCH_CONTENT_TYPE,
+ GRPC_BATCH_GRPC_INTERNAL_ENCODING_REQUEST,
+ GRPC_BATCH_USER_AGENT,
+ GRPC_BATCH_HOST,
+ GRPC_BATCH_LB_TOKEN,
+ GRPC_BATCH_LB_COST_BIN,
+ GRPC_BATCH_CALLOUTS_COUNT
+} grpc_metadata_batch_callouts_index;
+
+typedef union {
+ struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];
+ struct {
+ struct grpc_linked_mdelem *path;
+ struct grpc_linked_mdelem *method;
+ struct grpc_linked_mdelem *status;
+ struct grpc_linked_mdelem *authority;
+ struct grpc_linked_mdelem *scheme;
+ struct grpc_linked_mdelem *te;
+ struct grpc_linked_mdelem *grpc_message;
+ struct grpc_linked_mdelem *grpc_status;
+ struct grpc_linked_mdelem *grpc_payload_bin;
+ struct grpc_linked_mdelem *grpc_encoding;
+ struct grpc_linked_mdelem *grpc_accept_encoding;
+ struct grpc_linked_mdelem *content_type;
+ struct grpc_linked_mdelem *grpc_internal_encoding_request;
+ struct grpc_linked_mdelem *user_agent;
+ struct grpc_linked_mdelem *host;
+ struct grpc_linked_mdelem *lb_token;
+ struct grpc_linked_mdelem *lb_cost_bin;
+ } named;
+} grpc_metadata_batch_callouts;
+
+#define GRPC_BATCH_INDEX_OF(slice) \
+ (GRPC_IS_STATIC_METADATA_STRING((slice)) \
+ ? (grpc_metadata_batch_callouts_index)GPR_CLAMP( \
+ GRPC_STATIC_METADATA_INDEX((slice)), 0, \
+ GRPC_BATCH_CALLOUTS_COUNT) \
+ : GRPC_BATCH_CALLOUTS_COUNT)
-extern const uint8_t
- grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2];
-extern const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT];
extern const uint8_t grpc_static_accept_encoding_metadata[8];
-#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
- (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])
+#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
+ (GRPC_MAKE_MDELEM( \
+ &grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]], \
+ GRPC_MDELEM_STORAGE_STATIC))
#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.c b/src/core/lib/transport/status_conversion.c
index 5dce2f2d0c..af0ac89db7 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.c
+++ b/src/core/lib/transport/status_conversion.c
@@ -31,51 +31,51 @@
*
*/
-#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
+#include "src/core/lib/transport/status_conversion.h"
-int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) {
+int grpc_status_to_http2_error(grpc_status_code status) {
switch (status) {
case GRPC_STATUS_OK:
- return GRPC_CHTTP2_NO_ERROR;
+ return GRPC_HTTP2_NO_ERROR;
case GRPC_STATUS_CANCELLED:
- return GRPC_CHTTP2_CANCEL;
+ return GRPC_HTTP2_CANCEL;
case GRPC_STATUS_DEADLINE_EXCEEDED:
- return GRPC_CHTTP2_CANCEL;
+ return GRPC_HTTP2_CANCEL;
case GRPC_STATUS_RESOURCE_EXHAUSTED:
- return GRPC_CHTTP2_ENHANCE_YOUR_CALM;
+ return GRPC_HTTP2_ENHANCE_YOUR_CALM;
case GRPC_STATUS_PERMISSION_DENIED:
- return GRPC_CHTTP2_INADEQUATE_SECURITY;
+ return GRPC_HTTP2_INADEQUATE_SECURITY;
case GRPC_STATUS_UNAVAILABLE:
- return GRPC_CHTTP2_REFUSED_STREAM;
+ return GRPC_HTTP2_REFUSED_STREAM;
default:
- return GRPC_CHTTP2_INTERNAL_ERROR;
+ return GRPC_HTTP2_INTERNAL_ERROR;
}
}
-grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
- grpc_chttp2_error_code error, gpr_timespec deadline) {
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
+ gpr_timespec deadline) {
switch (error) {
- case GRPC_CHTTP2_NO_ERROR:
+ case GRPC_HTTP2_NO_ERROR:
/* should never be received */
return GRPC_STATUS_INTERNAL;
- case GRPC_CHTTP2_CANCEL:
+ case GRPC_HTTP2_CANCEL:
/* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
* exceeded */
return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0
? GRPC_STATUS_DEADLINE_EXCEEDED
: GRPC_STATUS_CANCELLED;
- case GRPC_CHTTP2_ENHANCE_YOUR_CALM:
+ case GRPC_HTTP2_ENHANCE_YOUR_CALM:
return GRPC_STATUS_RESOURCE_EXHAUSTED;
- case GRPC_CHTTP2_INADEQUATE_SECURITY:
+ case GRPC_HTTP2_INADEQUATE_SECURITY:
return GRPC_STATUS_PERMISSION_DENIED;
- case GRPC_CHTTP2_REFUSED_STREAM:
+ case GRPC_HTTP2_REFUSED_STREAM:
return GRPC_STATUS_UNAVAILABLE;
default:
return GRPC_STATUS_INTERNAL;
}
}
-grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status) {
+grpc_status_code grpc_http2_status_to_grpc_status(int status) {
switch (status) {
/* these HTTP2 status codes are called out explicitly in status.proto */
case 200:
@@ -110,6 +110,4 @@ grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status) {
}
}
-int grpc_chttp2_grpc_status_to_http2_status(grpc_status_code status) {
- return 200;
-}
+int grpc_status_to_http2_status(grpc_status_code status) { return 200; }
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h
index 953bc9f1e1..e6a23a606b 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.h
+++ b/src/core/lib/transport/status_conversion.h
@@ -31,20 +31,19 @@
*
*/
-#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STATUS_CONVERSION_H
-#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STATUS_CONVERSION_H
+#ifndef GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H
+#define GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H
#include <grpc/grpc.h>
-#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
+#include "src/core/lib/transport/http2_errors.h"
/* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */
-grpc_chttp2_error_code grpc_chttp2_grpc_status_to_http2_error(
- grpc_status_code status);
-grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
- grpc_chttp2_error_code error, gpr_timespec deadline);
+grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status);
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
+ gpr_timespec deadline);
/* Conversion of HTTP status codes (:status) to grpc status codes */
-grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status);
-int grpc_chttp2_grpc_status_to_http2_status(grpc_status_code status);
+grpc_status_code grpc_http2_status_to_grpc_status(int status);
+int grpc_status_to_http2_status(grpc_status_code status);
-#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STATUS_CONVERSION_H */
+#endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H */
diff --git a/src/core/lib/transport/timeout_encoding.c b/src/core/lib/transport/timeout_encoding.c
index b58ebbd0a8..0d4d7e5a7e 100644
--- a/src/core/lib/transport/timeout_encoding.c
+++ b/src/core/lib/transport/timeout_encoding.c
@@ -131,20 +131,21 @@ void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer) {
}
}
-static int is_all_whitespace(const char *p) {
- while (*p == ' ') p++;
- return *p == 0;
+static int is_all_whitespace(const char *p, const char *end) {
+ while (p != end && *p == ' ') p++;
+ return p == end;
}
-int grpc_http2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
+int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
int32_t x = 0;
- const uint8_t *p = (const uint8_t *)buffer;
+ const uint8_t *p = GRPC_SLICE_START_PTR(text);
+ const uint8_t *end = GRPC_SLICE_END_PTR(text);
int have_digit = 0;
/* skip whitespace */
- for (; *p == ' '; p++)
+ for (; p != end && *p == ' '; p++)
;
/* decode numeric part */
- for (; *p >= '0' && *p <= '9'; p++) {
+ for (; p != end && *p >= '0' && *p <= '9'; p++) {
int32_t digit = (int32_t)(*p - (uint8_t)'0');
have_digit = 1;
/* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */
@@ -158,8 +159,9 @@ int grpc_http2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
}
if (!have_digit) return 0;
/* skip whitespace */
- for (; *p == ' '; p++)
+ for (; p != end && *p == ' '; p++)
;
+ if (p == end) return 0;
/* decode unit specifier */
switch (*p) {
case 'n':
@@ -184,5 +186,5 @@ int grpc_http2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
return 0;
}
p++;
- return is_all_whitespace((const char *)p);
+ return is_all_whitespace((const char *)p, (const char *)end);
}
diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h
index 92f02f6ecd..4c8025d800 100644
--- a/src/core/lib/transport/timeout_encoding.h
+++ b/src/core/lib/transport/timeout_encoding.h
@@ -34,7 +34,9 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H
#define GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H
+#include <grpc/slice.h>
#include <grpc/support/time.h>
+
#include "src/core/lib/support/string.h"
#define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1)
@@ -42,6 +44,6 @@
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer);
-int grpc_http2_decode_timeout(const char *buffer, gpr_timespec *timeout);
+int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout);
#endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 055edbb39f..004e748f25 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -40,6 +40,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
@@ -69,6 +70,16 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
+ if (exec_ctx->flags & GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) {
+ /* Ick.
+ The thread we're running on MAY be owned (indirectly) by a call-stack.
+ If that's the case, destroying the call-stack MAY try to destroy the
+ thread, which is a tangled mess that we just don't want to ever have to
+ cope with.
+ Throw this over to the executor (on a core-owned thread) and process it
+ there. */
+ refcount->destroy.scheduler = grpc_executor_scheduler;
+ }
grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
}
@@ -173,93 +184,7 @@ void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
grpc_closure_sched(exec_ctx, op->on_complete, error);
-}
-
-typedef struct {
- grpc_error *error;
- grpc_closure *then_call;
- grpc_closure closure;
-} close_message_data;
-
-static void free_message(grpc_exec_ctx *exec_ctx, void *p, grpc_error *error) {
- close_message_data *cmd = p;
- GRPC_ERROR_UNREF(cmd->error);
- if (cmd->then_call != NULL) {
- cmd->then_call->cb(exec_ctx, cmd->then_call->cb_arg, error);
- }
- gpr_free(cmd);
-}
-
-static void add_error(grpc_transport_stream_op *op, grpc_error **which,
- grpc_error *error) {
- close_message_data *cmd;
- cmd = gpr_malloc(sizeof(*cmd));
- cmd->error = error;
- cmd->then_call = op->on_complete;
- grpc_closure_init(&cmd->closure, free_message, cmd,
- grpc_schedule_on_exec_ctx);
- op->on_complete = &cmd->closure;
- *which = error;
-}
-
-void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
- grpc_status_code status) {
- GPR_ASSERT(status != GRPC_STATUS_OK);
- if (op->cancel_error == GRPC_ERROR_NONE) {
- op->cancel_error = grpc_error_set_int(GRPC_ERROR_CANCELLED,
- GRPC_ERROR_INT_GRPC_STATUS, status);
- op->close_error = GRPC_ERROR_NONE;
- }
-}
-
-void grpc_transport_stream_op_add_cancellation_with_message(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op,
- grpc_status_code status, grpc_slice *optional_message) {
- GPR_ASSERT(status != GRPC_STATUS_OK);
- if (op->cancel_error != GRPC_ERROR_NONE) {
- if (optional_message) {
- grpc_slice_unref_internal(exec_ctx, *optional_message);
- }
- return;
- }
- grpc_error *error;
- if (optional_message != NULL) {
- char *msg = grpc_dump_slice(*optional_message, GPR_DUMP_ASCII);
- error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
- GRPC_ERROR_STR_GRPC_MESSAGE, msg);
- gpr_free(msg);
- grpc_slice_unref_internal(exec_ctx, *optional_message);
- } else {
- error = GRPC_ERROR_CREATE("Call cancelled");
- }
- error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, status);
- add_error(op, &op->cancel_error, error);
-}
-
-void grpc_transport_stream_op_add_close(grpc_exec_ctx *exec_ctx,
- grpc_transport_stream_op *op,
- grpc_status_code status,
- grpc_slice *optional_message) {
- GPR_ASSERT(status != GRPC_STATUS_OK);
- if (op->cancel_error != GRPC_ERROR_NONE ||
- op->close_error != GRPC_ERROR_NONE) {
- if (optional_message) {
- grpc_slice_unref_internal(exec_ctx, *optional_message);
- }
- return;
- }
- grpc_error *error;
- if (optional_message != NULL) {
- char *msg = grpc_dump_slice(*optional_message, GPR_DUMP_ASCII);
- error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
- GRPC_ERROR_STR_GRPC_MESSAGE, msg);
- gpr_free(msg);
- grpc_slice_unref_internal(exec_ctx, *optional_message);
- } else {
- error = GRPC_ERROR_CREATE("Call force closed");
- }
- error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, status);
- add_error(op, &op->close_error, error);
+ GRPC_ERROR_UNREF(op->cancel_error);
}
typedef struct {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index d1281830aa..9a0abe1ca4 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -150,13 +150,18 @@ typedef struct grpc_transport_stream_op {
/** Collect any stats into provided buffer, zero internal stat counters */
grpc_transport_stream_stats *collect_stats;
- /** If != GRPC_ERROR_NONE, cancel this stream */
+ /** If != GRPC_ERROR_NONE, forcefully close this stream.
+ The HTTP2 semantics should be:
+ - server side: if cancel_error has GRPC_ERROR_INT_GRPC_STATUS, and
+ trailing metadata has not been sent, send trailing metadata with status
+ and message from cancel_error (use grpc_error_get_status) followed by
+ a RST_STREAM with error=GRPC_CHTTP2_NO_ERROR to force a full close
+ - at all other times: use grpc_error_get_status to get a status code, and
+ convert to a HTTP2 error code using
+ grpc_chttp2_grpc_status_to_http2_error. Send a RST_STREAM with this
+ error. */
grpc_error *cancel_error;
- /** If != GRPC_ERROR_NONE, send grpc-status, grpc-message, and close this
- stream for both reading and writing */
- grpc_error *close_error;
-
/* Indexes correspond to grpc_context_index enum values */
grpc_call_context_element *context;
@@ -176,13 +181,8 @@ typedef struct grpc_transport_op {
grpc_connectivity_state *connectivity_state;
/** should the transport be disconnected */
grpc_error *disconnect_with_error;
- /** should we send a goaway?
- after a goaway is sent, once there are no more active calls on
- the transport, the transport should disconnect */
- bool send_goaway;
/** what should the goaway contain? */
- grpc_status_code goaway_status;
- grpc_slice *goaway_message;
+ grpc_error *goaway_error;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures.
If true, the callback is set to set_accept_stream_fn, with its
@@ -245,18 +245,6 @@ void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
grpc_error *error);
-void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
- grpc_status_code status);
-
-void grpc_transport_stream_op_add_cancellation_with_message(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op,
- grpc_status_code status, grpc_slice *optional_message);
-
-void grpc_transport_stream_op_add_close(grpc_exec_ctx *exec_ctx,
- grpc_transport_stream_op *op,
- grpc_status_code status,
- grpc_slice *optional_message);
-
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
char *grpc_transport_op_string(grpc_transport_op *op);
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 58d6ad508e..28360e3784 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -47,14 +47,14 @@
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
-static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
+static void put_metadata(gpr_strvec *b, grpc_mdelem md) {
gpr_strvec_add(b, gpr_strdup("key="));
gpr_strvec_add(
- b, grpc_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ b, grpc_dump_slice(GRPC_MDKEY(md), GPR_DUMP_HEX | GPR_DUMP_ASCII));
gpr_strvec_add(b, gpr_strdup(" value="));
gpr_strvec_add(
- b, grpc_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII));
+ b, grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
@@ -121,15 +121,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
- grpc_error_free_string(msg);
- gpr_strvec_add(&b, tmp);
- }
- if (op->close_error != GRPC_ERROR_NONE) {
- gpr_strvec_add(&b, gpr_strdup(" "));
- const char *msg = grpc_error_string(op->close_error);
- gpr_asprintf(&tmp, "CLOSE:%s", msg);
- grpc_error_free_string(msg);
gpr_strvec_add(&b, tmp);
}
@@ -168,18 +160,14 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
const char *err = grpc_error_string(op->disconnect_with_error);
gpr_asprintf(&tmp, "DISCONNECT:%s", err);
gpr_strvec_add(&b, tmp);
- grpc_error_free_string(err);
}
- if (op->send_goaway) {
+ if (op->goaway_error) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
- char *msg = op->goaway_message == NULL
- ? "null"
- : grpc_dump_slice(*op->goaway_message,
- GPR_DUMP_ASCII | GPR_DUMP_HEX);
- gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
- if (op->goaway_message != NULL) gpr_free(msg);
+ const char *msg = grpc_error_string(op->goaway_error);
+ gpr_asprintf(&tmp, "SEND_GOAWAY:%s", msg);
+
gpr_strvec_add(&b, tmp);
}
diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc
index 357d8317ad..c985183ae7 100644
--- a/src/cpp/client/channel_cc.cc
+++ b/src/cpp/client/channel_cc.cc
@@ -107,10 +107,20 @@ Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
} else if (!host_.empty()) {
host_str = host_.c_str();
}
- c_call = grpc_channel_create_call(c_channel_, context->propagate_from_call_,
- context->propagation_options_.c_bitmask(),
- cq->cq(), method.name(), host_str,
- context->raw_deadline(), nullptr);
+ grpc_slice method_slice = SliceFromCopiedString(method.name());
+ grpc_slice host_slice;
+ if (host_str != nullptr) {
+ host_slice = SliceFromCopiedString(host_str);
+ }
+ c_call = grpc_channel_create_call(
+ c_channel_, context->propagate_from_call_,
+ context->propagation_options_.c_bitmask(), cq->cq(), method_slice,
+ host_str == nullptr ? nullptr : &host_slice, context->raw_deadline(),
+ nullptr);
+ grpc_slice_unref(method_slice);
+ if (host_str != nullptr) {
+ grpc_slice_unref(host_slice);
+ }
}
grpc_census_call_set_context(c_call, context->census_context());
context->set_call(c_call, shared_from_this());
diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc
index 269c523bba..25f6bab7f2 100644
--- a/src/cpp/client/secure_credentials.cc
+++ b/src/cpp/client/secure_credentials.cc
@@ -206,15 +206,18 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
std::vector<grpc_metadata> md;
for (auto it = metadata.begin(); it != metadata.end(); ++it) {
grpc_metadata md_entry;
- md_entry.key = it->first.c_str();
- md_entry.value = it->second.data();
- md_entry.value_length = it->second.size();
+ md_entry.key = SliceFromCopiedString(it->first);
+ md_entry.value = SliceFromCopiedString(it->second);
md_entry.flags = 0;
md.push_back(md_entry);
}
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),
status.error_message().c_str());
+ for (auto it = md.begin(); it != md.end(); ++it) {
+ grpc_slice_unref(it->key);
+ grpc_slice_unref(it->value);
+ }
}
MetadataCredentialsPluginWrapper::MetadataCredentialsPluginWrapper(
diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h
index 281db17e98..713654ad5b 100644
--- a/src/cpp/client/secure_credentials.h
+++ b/src/cpp/client/secure_credentials.h
@@ -70,7 +70,7 @@ class SecureCallCredentials final : public CallCredentials {
grpc_call_credentials* const c_creds_;
};
-class MetadataCredentialsPluginWrapper final {
+class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen {
public:
static void Destroy(void* wrapper);
static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index 1fdd106130..65f3277499 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -143,6 +143,14 @@ void ChannelArguments::SetResourceQuota(
grpc_resource_quota_arg_vtable());
}
+void ChannelArguments::SetMaxReceiveMessageSize(int size) {
+ SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, size);
+}
+
+void ChannelArguments::SetMaxSendMessageSize(int size) {
+ SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, size);
+}
+
void ChannelArguments::SetLoadBalancingPolicyName(
const grpc::string& lb_policy_name) {
SetString(GRPC_ARG_LB_POLICY_NAME, lb_policy_name);
diff --git a/src/cpp/common/channel_filter.cc b/src/cpp/common/channel_filter.cc
index c0dc9dd63e..253614ca9b 100644
--- a/src/cpp/common/channel_filter.cc
+++ b/src/cpp/common/channel_filter.cc
@@ -36,6 +36,8 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/cpp/common/channel_filter.h"
+#include <grpc++/impl/codegen/slice.h>
+
namespace grpc {
// MetadataBatch
@@ -45,8 +47,10 @@ grpc_linked_mdelem *MetadataBatch::AddMetadata(grpc_exec_ctx *exec_ctx,
const string &value) {
grpc_linked_mdelem *storage = new grpc_linked_mdelem;
memset(storage, 0, sizeof(grpc_linked_mdelem));
- storage->md = grpc_mdelem_from_strings(exec_ctx, key.c_str(), value.c_str());
- grpc_metadata_batch_link_head(batch_, storage);
+ storage->md = grpc_mdelem_from_slices(exec_ctx, SliceFromCopiedString(key),
+ SliceFromCopiedString(value));
+ GRPC_LOG_IF_ERROR("MetadataBatch::AddMetadata",
+ grpc_metadata_batch_link_head(exec_ctx, batch_, storage));
return storage;
}
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index 5de8f5e463..5f9fd8790b 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -76,8 +76,8 @@ class MetadataBatch {
class const_iterator : public std::iterator<std::bidirectional_iterator_tag,
const grpc_mdelem> {
public:
- const grpc_mdelem &operator*() const { return *elem_->md; }
- const grpc_mdelem *operator->() const { return elem_->md; }
+ const grpc_mdelem &operator*() const { return elem_->md; }
+ const grpc_mdelem operator->() const { return elem_->md; }
const_iterator &operator++() {
elem_ = elem_->next;
@@ -133,7 +133,7 @@ class TransportOp {
grpc_error *disconnect_with_error() const {
return op_->disconnect_with_error;
}
- bool send_goaway() const { return op_->send_goaway; }
+ bool send_goaway() const { return op_->goaway_error != GRPC_ERROR_NONE; }
// TODO(roth): Add methods for additional fields as needed.
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index a07ad54376..36e4c89354 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -123,6 +123,17 @@ grpc_slice CoreCodegen::grpc_slice_split_tail(grpc_slice* s, size_t split) {
return ::grpc_slice_split_tail(s, split);
}
+grpc_slice CoreCodegen::grpc_slice_from_static_buffer(const void* buffer,
+ size_t length) {
+ return ::grpc_slice_from_static_buffer(buffer, length);
+}
+
+grpc_slice CoreCodegen::grpc_slice_from_copied_buffer(const void* buffer,
+ size_t length) {
+ return ::grpc_slice_from_copied_buffer(static_cast<const char*>(buffer),
+ length);
+}
+
void CoreCodegen::grpc_slice_buffer_add(grpc_slice_buffer* sb,
grpc_slice slice) {
::grpc_slice_buffer_add(sb, slice);
@@ -152,8 +163,10 @@ gpr_timespec CoreCodegen::gpr_time_0(gpr_clock_type type) {
return ::gpr_time_0(type);
}
-void CoreCodegen::assert_fail(const char* failed_assertion) {
- gpr_log(GPR_ERROR, "assertion failed: %s", failed_assertion);
+void CoreCodegen::assert_fail(const char* failed_assertion, const char* file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "assertion failed: %s",
+ failed_assertion);
abort();
}
diff --git a/src/cpp/server/dynamic_thread_pool.cc b/src/cpp/server/dynamic_thread_pool.cc
index 1fdc2edb25..afb5beaade 100644
--- a/src/cpp/server/dynamic_thread_pool.cc
+++ b/src/cpp/server/dynamic_thread_pool.cc
@@ -31,12 +31,15 @@
*
*/
+#include "src/cpp/server/dynamic_thread_pool.h"
+
#include <mutex>
#include <thread>
-#include "src/cpp/server/dynamic_thread_pool.h"
+#include <grpc/support/log.h>
namespace grpc {
+
DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
: pool_(pool),
thd_(new std::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
diff --git a/src/cpp/server/secure_server_credentials.cc b/src/cpp/server/secure_server_credentials.cc
index 33bdc2a1f4..10f662c77d 100644
--- a/src/cpp/server/secure_server_credentials.cc
+++ b/src/cpp/server/secure_server_credentials.cc
@@ -35,11 +35,12 @@
#include <map>
#include <memory>
+#include <grpc++/impl/codegen/slice.h>
+#include <grpc++/security/auth_metadata_processor.h>
+
#include "src/cpp/common/secure_auth_context.h"
#include "src/cpp/server/secure_server_credentials.h"
-#include <grpc++/security/auth_metadata_processor.h>
-
namespace grpc {
void AuthMetadataProcessorAyncWrapper::Destroy(void* wrapper) {
@@ -71,8 +72,8 @@ void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
grpc_process_auth_metadata_done_cb cb, void* user_data) {
AuthMetadataProcessor::InputMetadata metadata;
for (size_t i = 0; i < num_md; i++) {
- metadata.insert(std::make_pair(
- md[i].key, grpc::string_ref(md[i].value, md[i].value_length)));
+ metadata.insert(std::make_pair(StringRefFromSlice(&md[i].key),
+ StringRefFromSlice(&md[i].value)));
}
SecureAuthContext context(ctx, false);
AuthMetadataProcessor::OutputMetadata consumed_metadata;
@@ -85,9 +86,8 @@ void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
for (auto it = consumed_metadata.begin(); it != consumed_metadata.end();
++it) {
grpc_metadata md_entry;
- md_entry.key = it->first.c_str();
- md_entry.value = it->second.data();
- md_entry.value_length = it->second.size();
+ md_entry.key = SliceReferencingString(it->first);
+ md_entry.value = SliceReferencingString(it->second);
md_entry.flags = 0;
consumed_md.push_back(md_entry);
}
@@ -95,9 +95,8 @@ void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
for (auto it = response_metadata.begin(); it != response_metadata.end();
++it) {
grpc_metadata md_entry;
- md_entry.key = it->first.c_str();
- md_entry.value = it->second.data();
- md_entry.value_length = it->second.size();
+ md_entry.key = SliceReferencingString(it->first);
+ md_entry.value = SliceReferencingString(it->second);
md_entry.flags = 0;
response_md.push_back(md_entry);
}
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 817d85a81c..dcc56eecbc 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -576,7 +576,6 @@ ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
delete_on_finalize_(delete_on_finalize),
call_(nullptr) {
call_cq_->RegisterAvalanching(); // This op will trigger more ops
- memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_));
}
ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
@@ -586,16 +585,8 @@ ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
bool* status) {
if (*status) {
- for (size_t i = 0; i < initial_metadata_array_.count; i++) {
- context_->client_metadata_.insert(
- std::pair<grpc::string_ref, grpc::string_ref>(
- initial_metadata_array_.metadata[i].key,
- grpc::string_ref(
- initial_metadata_array_.metadata[i].value,
- initial_metadata_array_.metadata[i].value_length)));
- }
+ context_->client_metadata_.FillMap();
}
- grpc_metadata_array_destroy(&initial_metadata_array_);
context_->set_call(call_);
context_->cq_ = call_cq_;
Call call(call_, server_, call_cq_, server_->max_receive_message_size());
@@ -621,8 +612,8 @@ void ServerInterface::RegisteredAsyncRequest::IssueRequest(
ServerCompletionQueue* notification_cq) {
grpc_server_request_registered_call(
server_->server(), registered_method, &call_, &context_->deadline_,
- &initial_metadata_array_, payload, call_cq_->cq(), notification_cq->cq(),
- this);
+ context_->client_metadata_.arr(), payload, call_cq_->cq(),
+ notification_cq->cq(), this);
}
ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
@@ -635,7 +626,7 @@ ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
GPR_ASSERT(notification_cq);
GPR_ASSERT(call_cq);
grpc_server_request_call(server->server(), &call_, &call_details_,
- &initial_metadata_array_, call_cq->cq(),
+ context->client_metadata_.arr(), call_cq->cq(),
notification_cq->cq(), this);
}
@@ -644,11 +635,12 @@ bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
// TODO(yangg) remove the copy here.
if (*status) {
static_cast<GenericServerContext*>(context_)->method_ =
- call_details_.method;
- static_cast<GenericServerContext*>(context_)->host_ = call_details_.host;
+ StringFromCopiedSlice(call_details_.method);
+ static_cast<GenericServerContext*>(context_)->host_ =
+ StringFromCopiedSlice(call_details_.host);
}
- gpr_free(call_details_.method);
- gpr_free(call_details_.host);
+ grpc_slice_unref(call_details_.method);
+ grpc_slice_unref(call_details_.host);
return BaseAsyncRequest::FinalizeResult(tag, status);
}
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index a66ec4ac84..a7aaa25572 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -144,9 +144,10 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
sent_initial_metadata_(false),
compression_level_set_(false) {
for (size_t i = 0; i < metadata_count; i++) {
- client_metadata_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
- metadata[i].key,
- grpc::string_ref(metadata[i].value, metadata[i].value_length)));
+ client_metadata_.map()->insert(
+ std::pair<grpc::string_ref, grpc::string_ref>(
+ StringRefFromSlice(&metadata[i].key),
+ StringRefFromSlice(&metadata[i].value)));
}
}
diff --git a/src/cpp/test/server_context_test_spouse.cc b/src/cpp/test/server_context_test_spouse.cc
index b93152eea0..b812d169a5 100644
--- a/src/cpp/test/server_context_test_spouse.cc
+++ b/src/cpp/test/server_context_test_spouse.cc
@@ -40,11 +40,12 @@ void ServerContextTestSpouse::AddClientMetadata(const grpc::string& key,
const grpc::string& value) {
client_metadata_storage_.insert(
std::pair<grpc::string, grpc::string>(key, value));
- ctx_->client_metadata_.clear();
+ ctx_->client_metadata_.map()->clear();
for (auto iter = client_metadata_storage_.begin();
iter != client_metadata_storage_.end(); ++iter) {
- ctx_->client_metadata_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
- iter->first.c_str(), iter->second.c_str()));
+ ctx_->client_metadata_.map()->insert(
+ std::pair<grpc::string_ref, grpc::string_ref>(iter->first.c_str(),
+ iter->second.c_str()));
}
}
diff --git a/src/cpp/util/slice_cc.cc b/src/cpp/util/slice_cc.cc
index c05f1cf124..6efb68e123 100644
--- a/src/cpp/util/slice_cc.cc
+++ b/src/cpp/util/slice_cc.cc
@@ -35,7 +35,7 @@
namespace grpc {
-Slice::Slice() : slice_(gpr_empty_slice()) {}
+Slice::Slice() : slice_(grpc_empty_slice()) {}
Slice::~Slice() { grpc_slice_unref(slice_); }
diff --git a/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs b/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
index 26449ee539..0e4a77be81 100644
--- a/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
@@ -71,7 +71,9 @@ namespace Grpc.Core.Internal
// Gets data of recv_status_on_client completion.
public ClientSideStatus GetReceivedStatusOnClient()
{
- string details = Marshal.PtrToStringAnsi(Native.grpcsharp_batch_context_recv_status_on_client_details(this));
+ UIntPtr detailsLength;
+ IntPtr detailsPtr = Native.grpcsharp_batch_context_recv_status_on_client_details(this, out detailsLength);
+ string details = Marshal.PtrToStringAnsi(detailsPtr, (int) detailsLength.ToUInt32());
var status = new Status(Native.grpcsharp_batch_context_recv_status_on_client_status(this), details);
IntPtr metadataArrayPtr = Native.grpcsharp_batch_context_recv_status_on_client_trailing_metadata(this);
diff --git a/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs b/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
index 05dda5b148..d5b87a6c94 100644
--- a/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
@@ -79,9 +79,13 @@ namespace Grpc.Core.Internal
for (ulong i = 0; i < count; i++)
{
var index = new UIntPtr(i);
- string key = Marshal.PtrToStringAnsi(Native.grpcsharp_metadata_array_get_key(metadataArray, index));
- var bytes = new byte[Native.grpcsharp_metadata_array_get_value_length(metadataArray, index).ToUInt64()];
- Marshal.Copy(Native.grpcsharp_metadata_array_get_value(metadataArray, index), bytes, 0, bytes.Length);
+ UIntPtr keyLen;
+ IntPtr keyPtr = Native.grpcsharp_metadata_array_get_key(metadataArray, index, out keyLen);
+ string key = Marshal.PtrToStringAnsi(keyPtr, (int)keyLen.ToUInt32());
+ UIntPtr valueLen;
+ IntPtr valuePtr = Native.grpcsharp_metadata_array_get_value(metadataArray, index, out valueLen);
+ var bytes = new byte[valueLen.ToUInt64()];
+ Marshal.Copy(valuePtr, bytes, 0, bytes.Length);
metadata.Add(Metadata.Entry.CreateUnsafe(key, bytes));
}
return metadata;
diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
index ce38e37093..2f377071f7 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
@@ -128,7 +128,6 @@ namespace Grpc.Core.Internal
public readonly Delegates.grpcsharp_metadata_array_count_delegate grpcsharp_metadata_array_count;
public readonly Delegates.grpcsharp_metadata_array_get_key_delegate grpcsharp_metadata_array_get_key;
public readonly Delegates.grpcsharp_metadata_array_get_value_delegate grpcsharp_metadata_array_get_value;
- public readonly Delegates.grpcsharp_metadata_array_get_value_length_delegate grpcsharp_metadata_array_get_value_length;
public readonly Delegates.grpcsharp_metadata_array_destroy_full_delegate grpcsharp_metadata_array_destroy_full;
public readonly Delegates.grpcsharp_redirect_log_delegate grpcsharp_redirect_log;
@@ -237,7 +236,6 @@ namespace Grpc.Core.Internal
this.grpcsharp_metadata_array_count = GetMethodDelegate<Delegates.grpcsharp_metadata_array_count_delegate>(library);
this.grpcsharp_metadata_array_get_key = GetMethodDelegate<Delegates.grpcsharp_metadata_array_get_key_delegate>(library);
this.grpcsharp_metadata_array_get_value = GetMethodDelegate<Delegates.grpcsharp_metadata_array_get_value_delegate>(library);
- this.grpcsharp_metadata_array_get_value_length = GetMethodDelegate<Delegates.grpcsharp_metadata_array_get_value_length_delegate>(library);
this.grpcsharp_metadata_array_destroy_full = GetMethodDelegate<Delegates.grpcsharp_metadata_array_destroy_full_delegate>(library);
this.grpcsharp_redirect_log = GetMethodDelegate<Delegates.grpcsharp_redirect_log_delegate>(library);
@@ -306,15 +304,15 @@ namespace Grpc.Core.Internal
public delegate IntPtr grpcsharp_batch_context_recv_message_length_delegate(BatchContextSafeHandle ctx);
public delegate void grpcsharp_batch_context_recv_message_to_buffer_delegate(BatchContextSafeHandle ctx, byte[] buffer, UIntPtr bufferLen);
public delegate StatusCode grpcsharp_batch_context_recv_status_on_client_status_delegate(BatchContextSafeHandle ctx);
- public delegate IntPtr grpcsharp_batch_context_recv_status_on_client_details_delegate(BatchContextSafeHandle ctx); // returns const char*
+ public delegate IntPtr grpcsharp_batch_context_recv_status_on_client_details_delegate(BatchContextSafeHandle ctx, out UIntPtr detailsLength);
public delegate IntPtr grpcsharp_batch_context_recv_status_on_client_trailing_metadata_delegate(BatchContextSafeHandle ctx);
public delegate int grpcsharp_batch_context_recv_close_on_server_cancelled_delegate(BatchContextSafeHandle ctx);
public delegate void grpcsharp_batch_context_destroy_delegate(IntPtr ctx);
public delegate RequestCallContextSafeHandle grpcsharp_request_call_context_create_delegate();
public delegate CallSafeHandle grpcsharp_request_call_context_call_delegate(RequestCallContextSafeHandle ctx);
- public delegate IntPtr grpcsharp_request_call_context_method_delegate(RequestCallContextSafeHandle ctx); // returns const char*
- public delegate IntPtr grpcsharp_request_call_context_host_delegate(RequestCallContextSafeHandle ctx); // returns const char*
+ public delegate IntPtr grpcsharp_request_call_context_method_delegate(RequestCallContextSafeHandle ctx, out UIntPtr methodLength);
+ public delegate IntPtr grpcsharp_request_call_context_host_delegate(RequestCallContextSafeHandle ctx, out UIntPtr hostLength);
public delegate Timespec grpcsharp_request_call_context_deadline_delegate(RequestCallContextSafeHandle ctx);
public delegate IntPtr grpcsharp_request_call_context_request_metadata_delegate(RequestCallContextSafeHandle ctx);
public delegate void grpcsharp_request_call_context_destroy_delegate(IntPtr ctx);
@@ -384,9 +382,8 @@ namespace Grpc.Core.Internal
public delegate MetadataArraySafeHandle grpcsharp_metadata_array_create_delegate(UIntPtr capacity);
public delegate void grpcsharp_metadata_array_add_delegate(MetadataArraySafeHandle array, string key, byte[] value, UIntPtr valueLength);
public delegate UIntPtr grpcsharp_metadata_array_count_delegate(IntPtr metadataArray);
- public delegate IntPtr grpcsharp_metadata_array_get_key_delegate(IntPtr metadataArray, UIntPtr index);
- public delegate IntPtr grpcsharp_metadata_array_get_value_delegate(IntPtr metadataArray, UIntPtr index);
- public delegate UIntPtr grpcsharp_metadata_array_get_value_length_delegate(IntPtr metadataArray, UIntPtr index);
+ public delegate IntPtr grpcsharp_metadata_array_get_key_delegate(IntPtr metadataArray, UIntPtr index, out UIntPtr keyLength);
+ public delegate IntPtr grpcsharp_metadata_array_get_value_delegate(IntPtr metadataArray, UIntPtr index, out UIntPtr valueLength);
public delegate void grpcsharp_metadata_array_destroy_full_delegate(IntPtr array);
public delegate void grpcsharp_redirect_log_delegate(GprLogDelegate callback);
diff --git a/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
index ea7819d7b1..c1560bc8bf 100644
--- a/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
@@ -66,8 +66,14 @@ namespace Grpc.Core.Internal
{
var call = Native.grpcsharp_request_call_context_call(this);
- var method = Marshal.PtrToStringAnsi(Native.grpcsharp_request_call_context_method(this));
- var host = Marshal.PtrToStringAnsi(Native.grpcsharp_request_call_context_host(this));
+ UIntPtr methodLen;
+ IntPtr methodPtr = Native.grpcsharp_request_call_context_method(this, out methodLen);
+ var method = Marshal.PtrToStringAnsi(methodPtr, (int) methodLen.ToUInt32());
+
+ UIntPtr hostLen;
+ IntPtr hostPtr = Native.grpcsharp_request_call_context_host(this, out hostLen);
+ var host = Marshal.PtrToStringAnsi(hostPtr, (int) hostLen.ToUInt32());
+
var deadline = Native.grpcsharp_request_call_context_deadline(this);
IntPtr metadataArrayPtr = Native.grpcsharp_request_call_context_request_metadata(this);
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index 946f5872c0..12c747b800 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -73,15 +73,13 @@ typedef struct grpcsharp_batch_context {
grpc_byte_buffer *send_message;
struct {
grpc_metadata_array trailing_metadata;
- char *status_details;
} send_status_from_server;
grpc_metadata_array recv_initial_metadata;
grpc_byte_buffer *recv_message;
struct {
grpc_metadata_array trailing_metadata;
grpc_status_code status;
- char *status_details;
- size_t status_details_capacity;
+ grpc_slice status_details;
} recv_status_on_client;
int recv_close_on_server_cancelled;
} grpcsharp_batch_context;
@@ -122,8 +120,8 @@ void grpcsharp_metadata_array_destroy_metadata_including_entries(
size_t i;
if (array->metadata) {
for (i = 0; i < array->count; i++) {
- gpr_free((void *)array->metadata[i].key);
- gpr_free((void *)array->metadata[i].value);
+ grpc_slice_unref(array->metadata[i].key);
+ grpc_slice_unref(array->metadata[i].value);
}
}
gpr_free(array->metadata);
@@ -167,10 +165,8 @@ grpcsharp_metadata_array_add(grpc_metadata_array *array, const char *key,
const char *value, size_t value_length) {
size_t i = array->count;
GPR_ASSERT(array->count < array->capacity);
- array->metadata[i].key = gpr_strdup(key);
- array->metadata[i].value = (char *)gpr_malloc(value_length);
- memcpy((void *)array->metadata[i].value, value, value_length);
- array->metadata[i].value_length = value_length;
+ array->metadata[i].key = grpc_slice_from_copied_string(key);
+ array->metadata[i].value = grpc_slice_from_copied_buffer(value, value_length);
array->count++;
}
@@ -180,21 +176,17 @@ grpcsharp_metadata_array_count(grpc_metadata_array *array) {
}
GPR_EXPORT const char *GPR_CALLTYPE
-grpcsharp_metadata_array_get_key(grpc_metadata_array *array, size_t index) {
+grpcsharp_metadata_array_get_key(grpc_metadata_array *array, size_t index, size_t *key_length) {
GPR_ASSERT(index < array->count);
- return array->metadata[index].key;
+ *key_length = GRPC_SLICE_LENGTH(array->metadata[index].key);
+ return (char *)GRPC_SLICE_START_PTR(array->metadata[index].key);
}
GPR_EXPORT const char *GPR_CALLTYPE
-grpcsharp_metadata_array_get_value(grpc_metadata_array *array, size_t index) {
+grpcsharp_metadata_array_get_value(grpc_metadata_array *array, size_t index, size_t *value_length) {
GPR_ASSERT(index < array->count);
- return array->metadata[index].value;
-}
-
-GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_metadata_array_get_value_length(
- grpc_metadata_array *array, size_t index) {
- GPR_ASSERT(index < array->count);
- return (intptr_t)array->metadata[index].value_length;
+ *value_length = GRPC_SLICE_LENGTH(array->metadata[index].value);
+ return (char *)GRPC_SLICE_START_PTR(array->metadata[index].value);
}
/* Move contents of metadata array */
@@ -227,7 +219,6 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_con
grpcsharp_metadata_array_destroy_metadata_including_entries(
&(ctx->send_status_from_server.trailing_metadata));
- gpr_free(ctx->send_status_from_server.status_details);
grpcsharp_metadata_array_destroy_metadata_only(&(ctx->recv_initial_metadata));
@@ -235,7 +226,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_con
grpcsharp_metadata_array_destroy_metadata_only(
&(ctx->recv_status_on_client.trailing_metadata));
- gpr_free((void *)ctx->recv_status_on_client.status_details);
+ grpc_slice_unref(ctx->recv_status_on_client.status_details);
gpr_free(ctx);
}
@@ -307,8 +298,9 @@ grpcsharp_batch_context_recv_status_on_client_status(
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_batch_context_recv_status_on_client_details(
- const grpcsharp_batch_context *ctx) {
- return ctx->recv_status_on_client.status_details;
+ const grpcsharp_batch_context *ctx, size_t *details_length) {
+ *details_length = GRPC_SLICE_LENGTH(ctx->recv_status_on_client.status_details);
+ return (char *)GRPC_SLICE_START_PTR(ctx->recv_status_on_client.status_details);
}
GPR_EXPORT const grpc_metadata_array *GPR_CALLTYPE
@@ -324,13 +316,15 @@ GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_request_call_context_call(
GPR_EXPORT const char *GPR_CALLTYPE
grpcsharp_request_call_context_method(
- const grpcsharp_request_call_context *ctx) {
- return ctx->call_details.method;
+ const grpcsharp_request_call_context *ctx, size_t *method_length) {
+ *method_length = GRPC_SLICE_LENGTH(ctx->call_details.method);
+ return (char *)GRPC_SLICE_START_PTR(ctx->call_details.method);
}
GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_request_call_context_host(
- const grpcsharp_request_call_context *ctx) {
- return ctx->call_details.host;
+ const grpcsharp_request_call_context *ctx, size_t *host_length) {
+ *host_length = GRPC_SLICE_LENGTH(ctx->call_details.host);
+ return (char *)GRPC_SLICE_START_PTR(ctx->call_details.host);
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE
@@ -404,8 +398,15 @@ grpcsharp_channel_create_call(grpc_channel *channel, grpc_call *parent_call,
grpc_completion_queue *cq,
const char *method, const char *host,
gpr_timespec deadline) {
+ grpc_slice method_slice = grpc_slice_from_copied_string(method);
+ grpc_slice *host_slice_ptr = NULL;
+ grpc_slice host_slice;
+ if (host != NULL) {
+ host_slice = grpc_slice_from_copied_string(host);
+ host_slice_ptr = &host_slice;
+ }
return grpc_channel_create_call(channel, parent_call, propagation_mask, cq,
- method, host, deadline, NULL);
+ method_slice, host_slice_ptr, deadline, NULL);
}
GPR_EXPORT grpc_connectivity_state GPR_CALLTYPE
@@ -560,11 +561,8 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
&(ctx->recv_status_on_client.trailing_metadata);
ops[5].data.recv_status_on_client.status =
&(ctx->recv_status_on_client.status);
- /* not using preallocation for status_details */
ops[5].data.recv_status_on_client.status_details =
&(ctx->recv_status_on_client.status_details);
- ops[5].data.recv_status_on_client.status_details_capacity =
- &(ctx->recv_status_on_client.status_details_capacity);
ops[5].flags = 0;
ops[5].reserved = NULL;
@@ -604,11 +602,8 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
&(ctx->recv_status_on_client.trailing_metadata);
ops[3].data.recv_status_on_client.status =
&(ctx->recv_status_on_client.status);
- /* not using preallocation for status_details */
ops[3].data.recv_status_on_client.status_details =
&(ctx->recv_status_on_client.status_details);
- ops[3].data.recv_status_on_client.status_details_capacity =
- &(ctx->recv_status_on_client.status_details_capacity);
ops[3].flags = 0;
ops[3].reserved = NULL;
@@ -647,11 +642,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
&(ctx->recv_status_on_client.trailing_metadata);
ops[3].data.recv_status_on_client.status =
&(ctx->recv_status_on_client.status);
- /* not using preallocation for status_details */
ops[3].data.recv_status_on_client.status_details =
&(ctx->recv_status_on_client.status_details);
- ops[3].data.recv_status_on_client.status_details_capacity =
- &(ctx->recv_status_on_client.status_details_capacity);
ops[3].flags = 0;
ops[3].reserved = NULL;
@@ -681,11 +673,8 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
&(ctx->recv_status_on_client.trailing_metadata);
ops[1].data.recv_status_on_client.status =
&(ctx->recv_status_on_client.status);
- /* not using preallocation for status_details */
ops[1].data.recv_status_on_client.status_details =
&(ctx->recv_status_on_client.status_details);
- ops[1].data.recv_status_on_client.status_details_capacity =
- &(ctx->recv_status_on_client.status_details_capacity);
ops[1].flags = 0;
ops[1].reserved = NULL;
@@ -749,10 +738,10 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
size_t nops = 1;
+ grpc_slice status_details_slice = grpc_slice_from_copied_string(status_details);
ops[0].op = GRPC_OP_SEND_STATUS_FROM_SERVER;
ops[0].data.send_status_from_server.status = status_code;
- ops[0].data.send_status_from_server.status_details =
- gpr_strdup(status_details);
+ ops[0].data.send_status_from_server.status_details = &status_details_slice;
grpcsharp_metadata_array_move(
&(ctx->send_status_from_server.trailing_metadata), trailing_metadata);
ops[0].data.send_status_from_server.trailing_metadata_count =
diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index fc339fc462..7d6fb19860 100644
--- a/src/node/ext/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
@@ -40,6 +40,7 @@
#include "grpc/slice.h"
#include "byte_buffer.h"
+#include "slice.h"
namespace grpc {
namespace node {
@@ -54,10 +55,7 @@ using v8::Value;
grpc_byte_buffer *BufferToByteBuffer(Local<Value> buffer) {
Nan::HandleScope scope;
- int length = ::node::Buffer::Length(buffer);
- char *data = ::node::Buffer::Data(buffer);
- grpc_slice slice = grpc_slice_malloc(length);
- memcpy(GRPC_SLICE_START_PTR(slice), data, length);
+ grpc_slice slice = CreateSliceFromBuffer(buffer);
grpc_byte_buffer *byte_buffer(grpc_raw_byte_buffer_create(&slice, 1));
grpc_slice_unref(slice);
return byte_buffer;
diff --git a/src/node/ext/call.cc b/src/node/ext/call.cc
index 191e763e0e..9213d5e87d 100644
--- a/src/node/ext/call.cc
+++ b/src/node/ext/call.cc
@@ -48,6 +48,7 @@
#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "call_credentials.h"
+#include "slice.h"
#include "timeval.h"
using std::unique_ptr;
@@ -96,8 +97,7 @@ Local<Value> nanErrorWithCode(const char *msg, grpc_call_error code) {
return scope.Escape(err);
}
-bool CreateMetadataArray(Local<Object> metadata, grpc_metadata_array *array,
- shared_ptr<Resources> resources) {
+bool CreateMetadataArray(Local<Object> metadata, grpc_metadata_array *array) {
HandleScope scope;
grpc_metadata_array_init(array);
Local<Array> keys = Nan::GetOwnPropertyNames(metadata).ToLocalChecked();
@@ -113,32 +113,25 @@ bool CreateMetadataArray(Local<Object> metadata, grpc_metadata_array *array,
array->metadata = reinterpret_cast<grpc_metadata*>(
gpr_malloc(array->capacity * sizeof(grpc_metadata)));
for (unsigned int i = 0; i < keys->Length(); i++) {
- Local<String> current_key(keys->Get(i)->ToString());
- Utf8String *utf8_key = new Utf8String(current_key);
- resources->strings.push_back(unique_ptr<Utf8String>(utf8_key));
+ Local<String> current_key(Nan::To<String>(keys->Get(i)).ToLocalChecked());
Local<Array> values = Local<Array>::Cast(
Nan::Get(metadata, current_key).ToLocalChecked());
+ grpc_slice key_slice = grpc_slice_intern(CreateSliceFromString(current_key));
for (unsigned int j = 0; j < values->Length(); j++) {
Local<Value> value = Nan::Get(values, j).ToLocalChecked();
grpc_metadata *current = &array->metadata[array->count];
- current->key = **utf8_key;
+ current->key = key_slice;
// Only allow binary headers for "-bin" keys
- if (grpc_is_binary_header(current->key, strlen(current->key))) {
+ if (grpc_is_binary_header(key_slice)) {
if (::node::Buffer::HasInstance(value)) {
- current->value = ::node::Buffer::Data(value);
- current->value_length = ::node::Buffer::Length(value);
- PersistentValue *handle = new PersistentValue(value);
- resources->handles.push_back(unique_ptr<PersistentValue>(handle));
+ current->value = CreateSliceFromBuffer(value);
} else {
return false;
}
} else {
if (value->IsString()) {
Local<String> string_value = Nan::To<String>(value).ToLocalChecked();
- Utf8String *utf8_value = new Utf8String(string_value);
- resources->strings.push_back(unique_ptr<Utf8String>(utf8_value));
- current->value = **utf8_value;
- current->value_length = string_value->Length();
+ current->value = CreateSliceFromString(string_value);
} else {
return false;
}
@@ -153,40 +146,25 @@ Local<Value> ParseMetadata(const grpc_metadata_array *metadata_array) {
EscapableHandleScope scope;
grpc_metadata *metadata_elements = metadata_array->metadata;
size_t length = metadata_array->count;
- std::map<const char*, size_t> size_map;
- std::map<const char*, size_t> index_map;
-
- for (unsigned int i = 0; i < length; i++) {
- const char *key = metadata_elements[i].key;
- if (size_map.count(key)) {
- size_map[key] += 1;
- } else {
- size_map[key] = 1;
- }
- index_map[key] = 0;
- }
Local<Object> metadata_object = Nan::New<Object>();
for (unsigned int i = 0; i < length; i++) {
grpc_metadata* elem = &metadata_elements[i];
- Local<String> key_string = Nan::New(elem->key).ToLocalChecked();
+ // TODO(murgatroid99): Use zero-copy string construction instead
+ Local<String> key_string = CopyStringFromSlice(elem->key);
Local<Array> array;
MaybeLocal<Value> maybe_array = Nan::Get(metadata_object, key_string);
if (maybe_array.IsEmpty() || !maybe_array.ToLocalChecked()->IsArray()) {
- array = Nan::New<Array>(size_map[elem->key]);
+ array = Nan::New<Array>(0);
Nan::Set(metadata_object, key_string, array);
} else {
array = Local<Array>::Cast(maybe_array.ToLocalChecked());
}
- if (grpc_is_binary_header(elem->key, strlen(elem->key))) {
- Nan::Set(array, index_map[elem->key],
- MakeFastBuffer(
- Nan::CopyBuffer(elem->value,
- elem->value_length).ToLocalChecked()));
+ if (grpc_is_binary_header(elem->key)) {
+ Nan::Set(array, array->Length(), CreateBufferFromSlice(elem->value));
} else {
- Nan::Set(array, index_map[elem->key],
- Nan::New(elem->value).ToLocalChecked());
+ // TODO(murgatroid99): Use zero-copy string construction instead
+ Nan::Set(array, array->Length(), CopyStringFromSlice(elem->value));
}
- index_map[elem->key] += 1;
}
return scope.Escape(metadata_object);
}
@@ -205,8 +183,7 @@ class SendMetadataOp : public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
if (!value->IsObject()) {
return false;
}
@@ -216,7 +193,7 @@ class SendMetadataOp : public Op {
return false;
}
if (!CreateMetadataArray(maybe_metadata.ToLocalChecked(),
- &array, resources)) {
+ &array)) {
return false;
}
out->data.send_initial_metadata.count = array.count;
@@ -246,8 +223,7 @@ class SendMessageOp : public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
if (!::node::Buffer::HasInstance(value)) {
return false;
}
@@ -263,8 +239,6 @@ class SendMessageOp : public Op {
}
send_message = BufferToByteBuffer(value);
out->data.send_message = send_message;
- PersistentValue *handle = new PersistentValue(value);
- resources->handles.push_back(unique_ptr<PersistentValue>(handle));
return true;
}
bool IsFinalOp() {
@@ -284,8 +258,7 @@ class SendClientCloseOp : public Op {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
@@ -299,12 +272,14 @@ class SendClientCloseOp : public Op {
class SendServerStatusOp : public Op {
public:
+ ~SendServerStatusOp() {
+ grpc_slice_unref(details);
+ }
Local<Value> GetNodeValue() const {
EscapableHandleScope scope;
return scope.Escape(Nan::True());
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
if (!value->IsObject()) {
return false;
}
@@ -339,16 +314,15 @@ class SendServerStatusOp : public Op {
Local<String> details = Nan::To<String>(
maybe_details.ToLocalChecked()).ToLocalChecked();
grpc_metadata_array array;
- if (!CreateMetadataArray(metadata, &array, resources)) {
+ if (!CreateMetadataArray(metadata, &array)) {
return false;
}
out->data.send_status_from_server.trailing_metadata_count = array.count;
out->data.send_status_from_server.trailing_metadata = array.metadata;
out->data.send_status_from_server.status =
static_cast<grpc_status_code>(code);
- Utf8String *str = new Utf8String(details);
- resources->strings.push_back(unique_ptr<Utf8String>(str));
- out->data.send_status_from_server.status_details = **str;
+ this->details = CreateSliceFromString(details);
+ out->data.send_status_from_server.status_details = &this->details;
return true;
}
bool IsFinalOp() {
@@ -358,6 +332,9 @@ class SendServerStatusOp : public Op {
std::string GetTypeString() const {
return "send_status";
}
+
+ private:
+ grpc_slice details;
};
class GetMetadataOp : public Op {
@@ -375,8 +352,7 @@ class GetMetadataOp : public Op {
return scope.Escape(ParseMetadata(&recv_metadata));
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_initial_metadata = &recv_metadata;
return true;
}
@@ -408,8 +384,7 @@ class ReadMessageOp : public Op {
return scope.Escape(ByteBufferToBuffer(recv_message));
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_message = &recv_message;
return true;
}
@@ -430,21 +405,16 @@ class ClientStatusOp : public Op {
public:
ClientStatusOp() {
grpc_metadata_array_init(&metadata_array);
- status_details = NULL;
- details_capacity = 0;
}
~ClientStatusOp() {
grpc_metadata_array_destroy(&metadata_array);
- gpr_free(status_details);
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_status_on_client.trailing_metadata = &metadata_array;
out->data.recv_status_on_client.status = &status;
out->data.recv_status_on_client.status_details = &status_details;
- out->data.recv_status_on_client.status_details_capacity = &details_capacity;
return true;
}
@@ -453,10 +423,8 @@ class ClientStatusOp : public Op {
Local<Object> status_obj = Nan::New<Object>();
Nan::Set(status_obj, Nan::New("code").ToLocalChecked(),
Nan::New<Number>(status));
- if (status_details != NULL) {
- Nan::Set(status_obj, Nan::New("details").ToLocalChecked(),
- Nan::New(status_details).ToLocalChecked());
- }
+ Nan::Set(status_obj, Nan::New("details").ToLocalChecked(),
+ CopyStringFromSlice(status_details));
Nan::Set(status_obj, Nan::New("metadata").ToLocalChecked(),
ParseMetadata(&metadata_array));
return scope.Escape(status_obj);
@@ -471,8 +439,7 @@ class ClientStatusOp : public Op {
private:
grpc_metadata_array metadata_array;
grpc_status_code status;
- char *status_details;
- size_t details_capacity;
+ grpc_slice status_details;
};
class ServerCloseResponseOp : public Op {
@@ -482,8 +449,7 @@ class ServerCloseResponseOp : public Op {
return scope.Escape(Nan::New<Boolean>(cancelled));
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_close_on_server.cancelled = &cancelled;
return true;
}
@@ -500,9 +466,8 @@ class ServerCloseResponseOp : public Op {
int cancelled;
};
-tag::tag(Callback *callback, OpVec *ops,
- shared_ptr<Resources> resources, Call *call) :
- callback(callback), ops(ops), resources(resources), call(call){
+tag::tag(Callback *callback, OpVec *ops, Call *call) :
+ callback(callback), ops(ops), call(call){
}
tag::~tag() {
@@ -650,20 +615,24 @@ NAN_METHOD(Call::New) {
if (channel->GetWrappedChannel() == NULL) {
return Nan::ThrowError("Call cannot be created from a closed channel");
}
- Utf8String method(info[1]);
double deadline = Nan::To<double>(info[2]).FromJust();
grpc_channel *wrapped_channel = channel->GetWrappedChannel();
grpc_call *wrapped_call;
if (info[3]->IsString()) {
- Utf8String host_override(info[3]);
+ grpc_slice *host = new grpc_slice;
+ *host = CreateSliceFromString(
+ Nan::To<String>(info[3]).ToLocalChecked());
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
- GetCompletionQueue(), *method,
- *host_override, MillisecondsToTimespec(deadline), NULL);
+ GetCompletionQueue(), CreateSliceFromString(
+ Nan::To<String>(info[1]).ToLocalChecked()),
+ host, MillisecondsToTimespec(deadline), NULL);
+ delete host;
} else if (info[3]->IsUndefined() || info[3]->IsNull()) {
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
- GetCompletionQueue(), *method,
+ GetCompletionQueue(), CreateSliceFromString(
+ Nan::To<String>(info[1]).ToLocalChecked()),
NULL, MillisecondsToTimespec(deadline), NULL);
} else {
return Nan::ThrowTypeError("Call's fourth argument must be a string");
@@ -700,7 +669,6 @@ NAN_METHOD(Call::StartBatch) {
}
Local<Function> callback_func = info[1].As<Function>();
Call *call = ObjectWrap::Unwrap<Call>(info.This());
- shared_ptr<Resources> resources(new Resources);
Local<Object> obj = Nan::To<Object>(info[0]).ToLocalChecked();
Local<Array> keys = Nan::GetOwnPropertyNames(obj).ToLocalChecked();
size_t nops = keys->Length();
@@ -745,7 +713,7 @@ NAN_METHOD(Call::StartBatch) {
default:
return Nan::ThrowError("Argument object had an unrecognized key");
}
- if (!op->ParseOp(obj->Get(type), &ops[i], resources)) {
+ if (!op->ParseOp(obj->Get(type), &ops[i])) {
return Nan::ThrowTypeError("Incorrectly typed arguments to startBatch");
}
op_vector->push_back(std::move(op));
@@ -753,7 +721,7 @@ NAN_METHOD(Call::StartBatch) {
Callback *callback = new Callback(callback_func);
grpc_call_error error = grpc_call_start_batch(
call->wrapped_call, &ops[0], nops, new struct tag(
- callback, op_vector.release(), resources, call), NULL);
+ callback, op_vector.release(), call), NULL);
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("startBatch failed", error));
}
diff --git a/src/node/ext/call.h b/src/node/ext/call.h
index 31c6566d14..cffff00fce 100644
--- a/src/node/ext/call.h
+++ b/src/node/ext/call.h
@@ -51,20 +51,12 @@ namespace node {
using std::unique_ptr;
using std::shared_ptr;
-typedef Nan::Persistent<v8::Value, Nan::CopyablePersistentTraits<v8::Value>> PersistentValue;
-
v8::Local<v8::Value> nanErrorWithCode(const char *msg, grpc_call_error code);
v8::Local<v8::Value> ParseMetadata(const grpc_metadata_array *metadata_array);
-struct Resources {
- std::vector<unique_ptr<Nan::Utf8String> > strings;
- std::vector<unique_ptr<PersistentValue> > handles;
-};
-
bool CreateMetadataArray(v8::Local<v8::Object> metadata,
- grpc_metadata_array *array,
- shared_ptr<Resources> resources);
+ grpc_metadata_array *array);
/* Wrapper class for grpc_call structs. */
class Call : public Nan::ObjectWrap {
@@ -106,8 +98,7 @@ class Call : public Nan::ObjectWrap {
class Op {
public:
virtual v8::Local<v8::Value> GetNodeValue() const = 0;
- virtual bool ParseOp(v8::Local<v8::Value> value, grpc_op *out,
- shared_ptr<Resources> resources) = 0;
+ virtual bool ParseOp(v8::Local<v8::Value> value, grpc_op *out) = 0;
virtual ~Op();
v8::Local<v8::Value> GetOpType() const;
virtual bool IsFinalOp() = 0;
@@ -118,12 +109,10 @@ class Op {
typedef std::vector<unique_ptr<Op>> OpVec;
struct tag {
- tag(Nan::Callback *callback, OpVec *ops,
- shared_ptr<Resources> resources, Call *call);
+ tag(Nan::Callback *callback, OpVec *ops, Call *call);
~tag();
Nan::Callback *callback;
OpVec *ops;
- shared_ptr<Resources> resources;
Call *call;
};
diff --git a/src/node/ext/call_credentials.cc b/src/node/ext/call_credentials.cc
index 81fc552fd1..4d172d4ddf 100644
--- a/src/node/ext/call_credentials.cc
+++ b/src/node/ext/call_credentials.cc
@@ -206,7 +206,6 @@ NAN_METHOD(PluginCallback) {
return Nan::ThrowTypeError(
"The callback's fourth argument must be an object");
}
- shared_ptr<Resources> resources(new Resources);
grpc_status_code code = static_cast<grpc_status_code>(
Nan::To<uint32_t>(info[0]).FromJust());
Utf8String details_utf8_str(info[1]);
@@ -214,7 +213,7 @@ NAN_METHOD(PluginCallback) {
grpc_metadata_array array;
Local<Object> callback_data = Nan::To<Object>(info[3]).ToLocalChecked();
if (!CreateMetadataArray(Nan::To<Object>(info[2]).ToLocalChecked(),
- &array, resources)){
+ &array)){
return Nan::ThrowError("Failed to parse metadata");
}
grpc_credentials_plugin_metadata_cb cb =
diff --git a/src/node/ext/channel.cc b/src/node/ext/channel.cc
index 5bc58b9b32..c795ff7f42 100644
--- a/src/node/ext/channel.cc
+++ b/src/node/ext/channel.cc
@@ -280,8 +280,7 @@ NAN_METHOD(Channel::WatchConnectivityState) {
channel->wrapped_channel, last_state, MillisecondsToTimespec(deadline),
GetCompletionQueue(),
new struct tag(callback,
- ops.release(),
- shared_ptr<Resources>(nullptr), NULL));
+ ops.release(), NULL));
CompletionQueueNext();
}
diff --git a/src/node/ext/node_grpc.cc b/src/node/ext/node_grpc.cc
index 9b9eee85b7..682af0e5ad 100644
--- a/src/node/ext/node_grpc.cc
+++ b/src/node/ext/node_grpc.cc
@@ -56,9 +56,12 @@ extern "C" {
#include "server.h"
#include "completion_queue_async_worker.h"
#include "server_credentials.h"
+#include "slice.h"
#include "timeval.h"
#include "completion_queue.h"
+using grpc::node::CreateSliceFromString;
+
using v8::FunctionTemplate;
using v8::Local;
using v8::Value;
@@ -283,10 +286,8 @@ NAN_METHOD(MetadataKeyIsLegal) {
"headerKeyIsLegal's argument must be a string");
}
Local<String> key = Nan::To<String>(info[0]).ToLocalChecked();
- Nan::Utf8String key_utf8_str(key);
- char *key_str = *key_utf8_str;
info.GetReturnValue().Set(static_cast<bool>(
- grpc_header_key_is_legal(key_str, static_cast<size_t>(key->Length()))));
+ grpc_header_key_is_legal(CreateSliceFromString(key))));
}
NAN_METHOD(MetadataNonbinValueIsLegal) {
@@ -295,11 +296,8 @@ NAN_METHOD(MetadataNonbinValueIsLegal) {
"metadataNonbinValueIsLegal's argument must be a string");
}
Local<String> value = Nan::To<String>(info[0]).ToLocalChecked();
- Nan::Utf8String value_utf8_str(value);
- char *value_str = *value_utf8_str;
info.GetReturnValue().Set(static_cast<bool>(
- grpc_header_nonbin_value_is_legal(
- value_str, static_cast<size_t>(value->Length()))));
+ grpc_header_nonbin_value_is_legal(CreateSliceFromString(value))));
}
NAN_METHOD(MetadataKeyIsBinary) {
@@ -308,10 +306,8 @@ NAN_METHOD(MetadataKeyIsBinary) {
"metadataKeyIsLegal's argument must be a string");
}
Local<String> key = Nan::To<String>(info[0]).ToLocalChecked();
- Nan::Utf8String key_utf8_str(key);
- char *key_str = *key_utf8_str;
info.GetReturnValue().Set(static_cast<bool>(
- grpc_is_binary_header(key_str, static_cast<size_t>(key->Length()))));
+ grpc_is_binary_header(CreateSliceFromString(key))));
}
static grpc_ssl_roots_override_result get_ssl_roots_override(
diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc
index 70d5b96f39..4761b2867d 100644
--- a/src/node/ext/server.cc
+++ b/src/node/ext/server.cc
@@ -46,6 +46,7 @@
#include "grpc/grpc_security.h"
#include "grpc/support/log.h"
#include "server_credentials.h"
+#include "slice.h"
#include "timeval.h"
namespace grpc {
@@ -99,10 +100,11 @@ class NewCallOp : public Op {
}
Local<Object> obj = Nan::New<Object>();
Nan::Set(obj, Nan::New("call").ToLocalChecked(), Call::WrapStruct(call));
+ // TODO(murgatroid99): Use zero-copy string construction instead
Nan::Set(obj, Nan::New("method").ToLocalChecked(),
- Nan::New(details.method).ToLocalChecked());
+ CopyStringFromSlice(details.method));
Nan::Set(obj, Nan::New("host").ToLocalChecked(),
- Nan::New(details.host).ToLocalChecked());
+ CopyStringFromSlice(details.host));
Nan::Set(obj, Nan::New("deadline").ToLocalChecked(),
Nan::New<Date>(TimespecToMilliseconds(details.deadline))
.ToLocalChecked());
@@ -111,8 +113,7 @@ class NewCallOp : public Op {
return scope.Escape(obj);
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
@@ -139,8 +140,7 @@ class ServerShutdownOp : public Op {
return Nan::New<External>(reinterpret_cast<void *>(server));
}
- bool ParseOp(Local<Value> value, grpc_op *out,
- shared_ptr<Resources> resources) {
+ bool ParseOp(Local<Value> value, grpc_op *out) {
return true;
}
bool IsFinalOp() {
@@ -207,8 +207,7 @@ void Server::ShutdownServer() {
grpc_server_shutdown_and_notify(
this->wrapped_server, GetCompletionQueue(),
- new struct tag(new Callback(**shutdown_callback), ops.release(),
- shared_ptr<Resources>(nullptr), NULL));
+ new struct tag(new Callback(**shutdown_callback), ops.release(), NULL));
grpc_server_cancel_all_calls(this->wrapped_server);
CompletionQueueNext();
this->wrapped_server = NULL;
@@ -261,7 +260,7 @@ NAN_METHOD(Server::RequestCall) {
GetCompletionQueue(),
GetCompletionQueue(),
new struct tag(new Callback(info[0].As<Function>()), ops.release(),
- shared_ptr<Resources>(nullptr), NULL));
+ NULL));
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("requestCall failed", error));
}
@@ -314,7 +313,7 @@ NAN_METHOD(Server::TryShutdown) {
grpc_server_shutdown_and_notify(
server->wrapped_server, GetCompletionQueue(),
new struct tag(new Nan::Callback(info[0].As<Function>()), ops.release(),
- shared_ptr<Resources>(nullptr), NULL));
+ NULL));
CompletionQueueNext();
}
diff --git a/src/node/ext/slice.cc b/src/node/ext/slice.cc
new file mode 100644
index 0000000000..98a80b3d2f
--- /dev/null
+++ b/src/node/ext/slice.cc
@@ -0,0 +1,102 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <node.h>
+#include <nan.h>
+#include <grpc/slice.h>
+#include <grpc/support/alloc.h>
+
+#include "slice.h"
+#include "byte_buffer.h"
+
+namespace grpc {
+namespace node {
+
+using Nan::Persistent;
+
+using v8::Local;
+using v8::String;
+using v8::Value;
+
+namespace {
+void SliceFreeCallback(char *data, void *hint) {
+ grpc_slice *slice = reinterpret_cast<grpc_slice*>(hint);
+ grpc_slice_unref(*slice);
+ delete slice;
+}
+
+void string_destroy_func(void *user_data) {
+ delete reinterpret_cast<Nan::Utf8String*>(user_data);
+}
+
+void buffer_destroy_func(void *user_data) {
+ delete reinterpret_cast<PersistentValue*>(user_data);
+}
+} // namespace
+
+grpc_slice CreateSliceFromString(const Local<String> source) {
+ Nan::HandleScope scope;
+ Nan::Utf8String *utf8_value = new Nan::Utf8String(source);
+ return grpc_slice_new_with_user_data(**utf8_value, source->Length(),
+ string_destroy_func, utf8_value);
+}
+
+grpc_slice CreateSliceFromBuffer(const Local<Value> source) {
+ // Prerequisite: ::node::Buffer::HasInstance(source)
+ Nan::HandleScope scope;
+ return grpc_slice_new_with_user_data(::node::Buffer::Data(source),
+ ::node::Buffer::Length(source),
+ buffer_destroy_func,
+ new PersistentValue(source));
+}
+Local<String> CopyStringFromSlice(const grpc_slice slice) {
+ Nan::EscapableHandleScope scope;
+ if (GRPC_SLICE_LENGTH(slice) == 0) {
+ return scope.Escape(Nan::EmptyString());
+ }
+ return scope.Escape(Nan::New<String>(
+ const_cast<char *>(reinterpret_cast<const char *>(GRPC_SLICE_START_PTR(slice))),
+ GRPC_SLICE_LENGTH(slice)).ToLocalChecked());
+}
+
+Local<Value> CreateBufferFromSlice(const grpc_slice slice) {
+ Nan::EscapableHandleScope scope;
+ grpc_slice *slice_ptr = new grpc_slice;
+ *slice_ptr = grpc_slice_ref(slice);
+ return scope.Escape(MakeFastBuffer(Nan::NewBuffer(
+ const_cast<char *>(reinterpret_cast<const char *>(GRPC_SLICE_START_PTR(*slice_ptr))),
+ GRPC_SLICE_LENGTH(*slice_ptr), SliceFreeCallback, slice_ptr).ToLocalChecked()));
+}
+
+} // namespace node
+} // namespace grpc
diff --git a/src/node/ext/slice.h b/src/node/ext/slice.h
new file mode 100644
index 0000000000..7dcb1bd45a
--- /dev/null
+++ b/src/node/ext/slice.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <node.h>
+#include <nan.h>
+#include <grpc/slice.h>
+
+namespace grpc {
+namespace node {
+
+typedef Nan::Persistent<v8::Value, Nan::CopyablePersistentTraits<v8::Value>> PersistentValue;
+
+grpc_slice CreateSliceFromString(const v8::Local<v8::String> source);
+
+grpc_slice CreateSliceFromBuffer(const v8::Local<v8::Value> source);
+
+v8::Local<v8::String> CopyStringFromSlice(const grpc_slice slice);
+
+v8::Local<v8::Value> CreateBufferFromSlice(const grpc_slice slice);
+
+} // namespace node
+} // namespace grpc
diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec
index 2f47b02c0c..509358dada 100644
--- a/src/objective-c/CronetFramework.podspec
+++ b/src/objective-c/CronetFramework.podspec
@@ -30,7 +30,8 @@
Pod::Spec.new do |s|
s.name = "CronetFramework"
- s.version = "0.0.3"
+ v = '0.0.4'
+ s.version = v
s.summary = "Cronet, precompiled and used as a framework."
s.homepage = "http://chromium.org"
s.license = {
@@ -69,7 +70,7 @@ Pod::Spec.new do |s|
s.vendored_framework = "Cronet.framework"
s.author = "The Chromium Authors"
s.ios.deployment_target = "8.0"
- s.source = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' }
+ s.source = { :http => "https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework-v#{v}.zip"}
s.preserve_paths = "Cronet.framework"
s.public_header_files = "Cronet.framework/Headers/**/*{.h}"
s.source_files = "Cronet.framework/Headers/**/*{.h}"
diff --git a/src/objective-c/GRPCClient/GRPCCall+Cronet.h b/src/objective-c/GRPCClient/GRPCCall+Cronet.h
index 2d8f7ac8fb..b9d286c929 100644
--- a/src/objective-c/GRPCClient/GRPCCall+Cronet.h
+++ b/src/objective-c/GRPCClient/GRPCCall+Cronet.h
@@ -43,13 +43,13 @@
/**
* This method should be called before issuing the first RPC. It should be
* called only once. Create an instance of Cronet engine in your app elsewhere
- * and pass the instance pointer in the cronet_engine parameter. Once set,
+ * and pass the instance pointer in the stream_engine parameter. Once set,
* all subsequent RPCs will use Cronet transport. The method is not thread
* safe.
*/
-+(void)useCronetWithEngine:(cronet_engine *)engine;
++(void)useCronetWithEngine:(stream_engine *)engine;
-+(cronet_engine *)cronetEngine;
++(stream_engine *)cronetEngine;
+(BOOL)isUsingCronet;
diff --git a/src/objective-c/GRPCClient/GRPCCall+Cronet.m b/src/objective-c/GRPCClient/GRPCCall+Cronet.m
index 76ca1a2537..0e3598fb87 100644
--- a/src/objective-c/GRPCClient/GRPCCall+Cronet.m
+++ b/src/objective-c/GRPCClient/GRPCCall+Cronet.m
@@ -35,16 +35,16 @@
#ifdef GRPC_COMPILE_WITH_CRONET
static BOOL useCronet = NO;
-static cronet_engine *globalCronetEngine;
+static stream_engine *globalCronetEngine;
@implementation GRPCCall (Cronet)
-+ (void)useCronetWithEngine:(cronet_engine *)engine {
++ (void)useCronetWithEngine:(stream_engine *)engine {
useCronet = YES;
globalCronetEngine = engine;
}
-+ (cronet_engine *)cronetEngine {
++ (stream_engine *)cronetEngine {
return globalCronetEngine;
}
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.m b/src/objective-c/GRPCClient/private/GRPCChannel.m
index e49aceefe1..c533c5ae71 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.m
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.m
@@ -108,7 +108,7 @@ static grpc_channel_args *BuildChannelArgs(NSDictionary *dictionary) {
#ifdef GRPC_COMPILE_WITH_CRONET
- (instancetype)initWithHost:(NSString *)host
- cronetEngine:(cronet_engine *)cronetEngine
+ cronetEngine:(stream_engine *)cronetEngine
channelArgs:(NSDictionary *)channelArgs {
if (!host) {
[NSException raise:NSInvalidArgumentException format:@"host argument missing"];
@@ -163,7 +163,7 @@ static grpc_channel_args *BuildChannelArgs(NSDictionary *dictionary) {
#ifdef GRPC_COMPILE_WITH_CRONET
+ (GRPCChannel *)secureCronetChannelWithHost:(NSString *)host
channelArgs:(NSDictionary *)channelArgs {
- cronet_engine *engine = [GRPCCall cronetEngine];
+ stream_engine *engine = [GRPCCall cronetEngine];
if (!engine) {
[NSException raise:NSInvalidArgumentException
format:@"cronet_engine is NULL. Set it first."];
@@ -200,7 +200,7 @@ static grpc_channel_args *BuildChannelArgs(NSDictionary *dictionary) {
return grpc_channel_create_call(_unmanagedChannel,
NULL, GRPC_PROPAGATE_DEFAULTS,
queue.unmanagedQueue,
- path.UTF8String,
+ grpc_slice_from_copied_string(path.UTF8String),
NULL, // Passing NULL for host
gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
}
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index 38fcae0299..43c564552b 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -194,7 +194,7 @@
@implementation GRPCOpRecvStatus{
grpc_status_code _statusCode;
- char *_details;
+ grpc_slice _details;
size_t _detailsCapacity;
grpc_metadata_array _trailers;
}
@@ -208,7 +208,6 @@
_op.op = GRPC_OP_RECV_STATUS_ON_CLIENT;
_op.data.recv_status_on_client.status = &_statusCode;
_op.data.recv_status_on_client.status_details = &_details;
- _op.data.recv_status_on_client.status_details_capacity = &_detailsCapacity;
grpc_metadata_array_init(&_trailers);
_op.data.recv_status_on_client.trailing_metadata = &_trailers;
if (handler) {
@@ -216,11 +215,15 @@
__weak typeof(self) weakSelf = self;
_handler = ^{
__strong typeof(self) strongSelf = weakSelf;
- NSError *error = [NSError grpc_errorFromStatusCode:strongSelf->_statusCode
- details:strongSelf->_details];
- NSDictionary *trailers = [NSDictionary
- grpc_dictionaryFromMetadataArray:strongSelf->_trailers];
- handler(error, trailers);
+ if (strongSelf) {
+ char *details = grpc_slice_to_c_string(strongSelf->_details);
+ NSError *error = [NSError grpc_errorFromStatusCode:strongSelf->_statusCode
+ details:details];
+ NSDictionary *trailers = [NSDictionary
+ grpc_dictionaryFromMetadataArray:strongSelf->_trailers];
+ handler(error, trailers);
+ gpr_free(details);
+ }
};
}
}
@@ -229,7 +232,7 @@
- (void)dealloc {
grpc_metadata_array_destroy(&_trailers);
- gpr_free(_details);
+ grpc_slice_unref(_details);
}
@end
diff --git a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m
index 7477da7619..feb2bb5ed8 100644
--- a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m
+++ b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m
@@ -47,12 +47,12 @@
@implementation NSData (GRPCMetadata)
+ (instancetype)grpc_dataFromMetadataValue:(grpc_metadata *)metadata {
// TODO(jcanizales): Should we use a non-copy constructor?
- return [self dataWithBytes:metadata->value length:metadata->value_length];
+ return [self dataWithBytes:GRPC_SLICE_START_PTR(metadata->value)
+ length:GRPC_SLICE_LENGTH(metadata->value)];
}
- (void)grpc_initMetadata:(grpc_metadata *)metadata {
- metadata->value = self.bytes;
- metadata->value_length = self.length;
+ metadata->value = grpc_slice_from_copied_buffer(self.bytes, self.length);
}
@end
@@ -67,15 +67,14 @@
@implementation NSString (GRPCMetadata)
+ (instancetype)grpc_stringFromMetadataValue:(grpc_metadata *)metadata {
- return [[self alloc] initWithBytes:metadata->value
- length:metadata->value_length
+ return [[self alloc] initWithBytes:GRPC_SLICE_START_PTR(metadata->value)
+ length:GRPC_SLICE_LENGTH(metadata->value)
encoding:NSASCIIStringEncoding];
}
// Precondition: This object contains only ASCII characters.
- (void)grpc_initMetadata:(grpc_metadata *)metadata {
- metadata->value = self.UTF8String;
- metadata->value_length = self.length;
+ metadata->value = grpc_slice_from_copied_string(self.UTF8String);
}
@end
@@ -89,7 +88,10 @@
+ (instancetype)grpc_dictionaryFromMetadata:(grpc_metadata *)entries count:(size_t)count {
NSMutableDictionary *metadata = [NSMutableDictionary dictionaryWithCapacity:count];
for (grpc_metadata *entry = entries; entry < entries + count; entry++) {
- NSString *name = [NSString stringWithCString:entry->key encoding:NSASCIIStringEncoding];
+ char *key = grpc_slice_to_c_string(entry->key);
+ NSString *name = [NSString stringWithCString:key
+ encoding:NSASCIIStringEncoding];
+ gpr_free(key);
if (!name || metadata[name]) {
// Log if name is nil?
continue;
@@ -112,7 +114,7 @@
grpc_metadata *current = metadata;
for (NSString* key in self) {
id value = self[key];
- current->key = key.UTF8String;
+ current->key = grpc_slice_from_copied_string(key.UTF8String);
if ([value respondsToSelector:@selector(grpc_initMetadata:)]) {
[value grpc_initMetadata:current];
} else {
diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
index 4ba7badd86..01eea52ef3 100644
--- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
+++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
@@ -94,7 +94,7 @@ static void process_auth_failure(void *state, grpc_auth_context *ctx,
static void cronet_init_client_secure_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
- cronet_engine *cronetEngine) {
+ stream_engine *cronetEngine) {
fullstack_secure_fixture_data *ffd = f->fixture_data;
f->client = grpc_cronet_secure_channel_create(cronetEngine, ffd->localaddr,
client_args, NULL);
@@ -124,7 +124,7 @@ static void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) {
static void cronet_init_client_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
- cronet_engine *cronetEngine = [Cronet getGlobalEngine];
+ stream_engine *cronetEngine = [Cronet getGlobalEngine];
grpc_channel_args *new_client_args = grpc_channel_args_copy(client_args);
cronet_init_client_secure_fullstack(f, new_client_args, cronetEngine);
diff --git a/src/php/README.md b/src/php/README.md
index 1b15768d44..320220d3e4 100644
--- a/src/php/README.md
+++ b/src/php/README.md
@@ -163,6 +163,13 @@ of this repo. The plugin can be found in the `bins/opt` directory. We are
planning to provide a better way to download and install the plugin
in the future.
+You can also just build the gRPC PHP protoc plugin by running:
+
+```sh
+$ cd grpc
+$ make grpc_php_plugin
+```
+
### Client Stub
diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c
index 64b1137c2a..8a2070481e 100644
--- a/src/php/ext/grpc/call.c
+++ b/src/php/ext/grpc/call.c
@@ -100,11 +100,11 @@ zval *grpc_parse_metadata_array(grpc_metadata_array
grpc_metadata *elem;
for (i = 0; i < count; i++) {
elem = &elements[i];
- key_len = strlen(elem->key);
+ key_len = GRPC_SLICE_LENGTH(elem->key);
str_key = ecalloc(key_len + 1, sizeof(char));
- memcpy(str_key, elem->key, key_len);
- str_val = ecalloc(elem->value_length + 1, sizeof(char));
- memcpy(str_val, elem->value, elem->value_length);
+ memcpy(str_key, GRPC_SLICE_START_PTR(elem->key), key_len);
+ str_val = ecalloc(GRPC_SLICE_LENGTH(elem->value) + 1, sizeof(char));
+ memcpy(str_val, GRPC_SLICE_START_PTR(elem->value), GRPC_SLICE_LENGTH(elem->value));
if (php_grpc_zend_hash_find(array_hash, str_key, key_len, (void **)&data)
== SUCCESS) {
if (Z_TYPE_P(data) != IS_ARRAY) {
@@ -115,13 +115,13 @@ zval *grpc_parse_metadata_array(grpc_metadata_array
efree(str_val);
return NULL;
}
- php_grpc_add_next_index_stringl(data, str_val, elem->value_length,
+ php_grpc_add_next_index_stringl(data, str_val, GRPC_SLICE_LENGTH(elem->value),
false);
} else {
PHP_GRPC_MAKE_STD_ZVAL(inner_array);
array_init(inner_array);
php_grpc_add_next_index_stringl(inner_array, str_val,
- elem->value_length, false);
+ GRPC_SLICE_LENGTH(elem->value), false);
add_assoc_zval(array, str_key, inner_array);
}
}
@@ -164,7 +164,7 @@ bool create_metadata_array(zval *array, grpc_metadata_array *metadata) {
if (key_type1 != HASH_KEY_IS_STRING) {
return false;
}
- if (!grpc_header_key_is_legal(key1, strlen(key1))) {
+ if (!grpc_header_key_is_legal(grpc_slice_from_static_string(key1))) {
return false;
}
inner_array_hash = Z_ARRVAL_P(inner_array);
@@ -172,9 +172,8 @@ bool create_metadata_array(zval *array, grpc_metadata_array *metadata) {
if (Z_TYPE_P(value) != IS_STRING) {
return false;
}
- metadata->metadata[metadata->count].key = key1;
- metadata->metadata[metadata->count].value = Z_STRVAL_P(value);
- metadata->metadata[metadata->count].value_length = Z_STRLEN_P(value);
+ metadata->metadata[metadata->count].key = grpc_slice_from_copied_string(key1);
+ metadata->metadata[metadata->count].value = grpc_slice_from_copied_buffer(Z_STRVAL_P(value), Z_STRLEN_P(value));
metadata->count += 1;
PHP_GRPC_HASH_FOREACH_END()
PHP_GRPC_HASH_FOREACH_END()
@@ -229,10 +228,15 @@ PHP_METHOD(Call, __construct) {
}
add_property_zval(getThis(), "channel", channel_obj);
wrapped_grpc_timeval *deadline = Z_WRAPPED_GRPC_TIMEVAL_P(deadline_obj);
+ grpc_slice method_slice = grpc_slice_from_copied_string(method);
+ grpc_slice host_slice = host_override != NULL ?
+ grpc_slice_from_copied_string(host_override) : grpc_empty_slice();
call->wrapped =
grpc_channel_create_call(channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS,
- completion_queue, method, host_override,
+ completion_queue, method_slice, host_override != NULL ? &host_slice : NULL,
deadline->wrapped, NULL);
+ grpc_slice_unref(method_slice);
+ grpc_slice_unref(host_slice);
call->owned = true;
}
@@ -267,8 +271,8 @@ PHP_METHOD(Call, startBatch) {
grpc_metadata_array recv_metadata;
grpc_metadata_array recv_trailing_metadata;
grpc_status_code status;
- char *status_details = NULL;
- size_t status_details_capacity = 0;
+ grpc_slice recv_status_details = grpc_empty_slice();
+ grpc_slice send_status_details = grpc_empty_slice();
grpc_byte_buffer *message;
int cancelled;
grpc_call_error error;
@@ -380,8 +384,8 @@ PHP_METHOD(Call, startBatch) {
1 TSRMLS_CC);
goto cleanup;
}
- ops[op_num].data.send_status_from_server.status_details =
- Z_STRVAL_P(inner_value);
+ send_status_details = grpc_slice_from_copied_string(Z_STRVAL_P(inner_value));
+ ops[op_num].data.send_status_from_server.status_details = &send_status_details;
} else {
zend_throw_exception(spl_ce_InvalidArgumentException,
"String status details is required",
@@ -400,9 +404,7 @@ PHP_METHOD(Call, startBatch) {
&recv_trailing_metadata;
ops[op_num].data.recv_status_on_client.status = &status;
ops[op_num].data.recv_status_on_client.status_details =
- &status_details;
- ops[op_num].data.recv_status_on_client.status_details_capacity =
- &status_details_capacity;
+ &recv_status_details;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
ops[op_num].data.recv_close_on_server.cancelled = &cancelled;
@@ -474,8 +476,10 @@ PHP_METHOD(Call, startBatch) {
#endif
PHP_GRPC_DELREF(array);
add_property_long(recv_status, "code", status);
- php_grpc_add_property_string(recv_status, "details", status_details,
+ char *status_details_text = grpc_slice_to_c_string(recv_status_details);
+ php_grpc_add_property_string(recv_status, "details", status_details_text,
true);
+ gpr_free(status_details_text);
add_property_zval(result, "status", recv_status);
PHP_GRPC_DELREF(recv_status);
PHP_GRPC_FREE_STD_ZVAL(recv_status);
@@ -493,9 +497,8 @@ cleanup:
grpc_metadata_array_destroy(&trailing_metadata);
grpc_metadata_array_destroy(&recv_metadata);
grpc_metadata_array_destroy(&recv_trailing_metadata);
- if (status_details != NULL) {
- gpr_free(status_details);
- }
+ grpc_slice_unref(recv_status_details);
+ grpc_slice_unref(send_status_details);
for (int i = 0; i < op_num; i++) {
if (ops[i].op == GRPC_OP_SEND_MESSAGE) {
grpc_byte_buffer_destroy(ops[i].data.send_message);
diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c
index 2217a4f9a8..9ac5d2a3c3 100644
--- a/src/php/ext/grpc/server.c
+++ b/src/php/ext/grpc/server.c
@@ -49,6 +49,8 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
+#include <grpc/slice.h>
+#include <grpc/support/alloc.h>
#include "completion_queue.h"
#include "server.h"
@@ -149,8 +151,12 @@ PHP_METHOD(Server, requestCall) {
1 TSRMLS_CC);
goto cleanup;
}
- php_grpc_add_property_string(result, "method", details.method, true);
- php_grpc_add_property_string(result, "host", details.host, true);
+ char *method_text = grpc_slice_to_c_string(details.method);
+ char *host_text = grpc_slice_to_c_string(details.host);
+ php_grpc_add_property_string(result, "method", method_text, true);
+ php_grpc_add_property_string(result, "host", host_text, true);
+ gpr_free(method_text);
+ gpr_free(host_text);
#if PHP_MAJOR_VERSION < 7
add_property_zval(result, "call", grpc_php_wrap_call(call, true TSRMLS_CC));
add_property_zval(result, "absolute_deadline",
diff --git a/src/proto/grpc/testing/BUILD b/src/proto/grpc/testing/BUILD
index f9f9cbceaf..283740839d 100644
--- a/src/proto/grpc/testing/BUILD
+++ b/src/proto/grpc/testing/BUILD
@@ -1,3 +1,33 @@
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+licenses(["notice"]) # 3-clause BSD
package(default_visibility = ["//visibility:public"])
diff --git a/src/proto/grpc/testing/duplicate/BUILD b/src/proto/grpc/testing/duplicate/BUILD
index 255e699bec..8fc5a96af4 100644
--- a/src/proto/grpc/testing/duplicate/BUILD
+++ b/src/proto/grpc/testing/duplicate/BUILD
@@ -1,3 +1,33 @@
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+licenses(["notice"]) # 3-clause BSD
package(default_visibility = ["//visibility:public"])
diff --git a/src/python/grpcio/_spawn_patch.py b/src/python/grpcio/_spawn_patch.py
index 24306f0dd9..75d0a8b352 100644
--- a/src/python/grpcio/_spawn_patch.py
+++ b/src/python/grpcio/_spawn_patch.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Patches the spawn() command for windows compilers.
Windows has an 8191 character command line limit, but some compilers
@@ -45,29 +44,32 @@ MAX_COMMAND_LENGTH = 8191
_classic_spawn = ccompiler.CCompiler.spawn
+
def _commandfile_spawn(self, command):
- command_length = sum([len(arg) for arg in command])
- if os.name == 'nt' and command_length > MAX_COMMAND_LENGTH:
- # Even if this command doesn't support the @command_file, it will
- # fail as is so we try blindly
- print('Command line length exceeded, using command file')
- print(' '.join(command))
- temporary_directory = tempfile.mkdtemp()
- command_filename = os.path.abspath(
- os.path.join(temporary_directory, 'command'))
- with open(command_filename, 'w') as command_file:
- escaped_args = ['"' + arg.replace('\\', '\\\\') + '"' for arg in command[1:]]
- command_file.write(' '.join(escaped_args))
- modified_command = command[:1] + ['@{}'.format(command_filename)]
- try:
- _classic_spawn(self, modified_command)
- finally:
- shutil.rmtree(temporary_directory)
- else:
- _classic_spawn(self, command)
+ command_length = sum([len(arg) for arg in command])
+ if os.name == 'nt' and command_length > MAX_COMMAND_LENGTH:
+ # Even if this command doesn't support the @command_file, it will
+ # fail as is so we try blindly
+ print('Command line length exceeded, using command file')
+ print(' '.join(command))
+ temporary_directory = tempfile.mkdtemp()
+ command_filename = os.path.abspath(
+ os.path.join(temporary_directory, 'command'))
+ with open(command_filename, 'w') as command_file:
+ escaped_args = [
+ '"' + arg.replace('\\', '\\\\') + '"' for arg in command[1:]
+ ]
+ command_file.write(' '.join(escaped_args))
+ modified_command = command[:1] + ['@{}'.format(command_filename)]
+ try:
+ _classic_spawn(self, modified_command)
+ finally:
+ shutil.rmtree(temporary_directory)
+ else:
+ _classic_spawn(self, command)
def monkeypatch_spawn():
- """Monkeypatching is dumb, but it's either that or we become maintainers of
+ """Monkeypatching is dumb, but it's either that or we become maintainers of
something much, much bigger."""
- ccompiler.CCompiler.spawn = _commandfile_spawn
+ ccompiler.CCompiler.spawn = _commandfile_spawn
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 701c6af017..e09f922591 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Provides distutils command classes for the GRPC Python setup process."""
import distutils
@@ -87,138 +86,144 @@ Glossary
class CommandError(Exception):
- """Simple exception class for GRPC custom commands."""
+ """Simple exception class for GRPC custom commands."""
# TODO(atash): Remove this once PyPI has better Linux bdist support. See
# https://bitbucket.org/pypa/pypi/issues/120/binary-wheels-for-linux-are-not-supported
def _get_grpc_custom_bdist(decorated_basename, target_bdist_basename):
- """Returns a string path to a bdist file for Linux to install.
+ """Returns a string path to a bdist file for Linux to install.
If we can retrieve a pre-compiled bdist from online, uses it. Else, emits a
warning and builds from source.
"""
- # TODO(atash): somehow the name that's returned from `wheel` is different
- # between different versions of 'wheel' (but from a compatibility standpoint,
- # the names are compatible); we should have some way of determining name
- # compatibility in the same way `wheel` does to avoid having to rename all of
- # the custom wheels that we build/upload to GCS.
-
- # Break import style to ensure that setup.py has had a chance to install the
- # relevant package.
- from six.moves.urllib import request
- decorated_path = decorated_basename + GRPC_CUSTOM_BDIST_EXT
- try:
- url = BINARIES_REPOSITORY + '/{target}'.format(target=decorated_path)
- bdist_data = request.urlopen(url).read()
- except IOError as error:
- raise CommandError(
- '{}\n\nCould not find the bdist {}: {}'
- .format(traceback.format_exc(), decorated_path, error.message))
- # Our chosen local bdist path.
- bdist_path = target_bdist_basename + GRPC_CUSTOM_BDIST_EXT
- try:
- with open(bdist_path, 'w') as bdist_file:
- bdist_file.write(bdist_data)
- except IOError as error:
- raise CommandError(
- '{}\n\nCould not write grpcio bdist: {}'
- .format(traceback.format_exc(), error.message))
- return bdist_path
+ # TODO(atash): somehow the name that's returned from `wheel` is different
+ # between different versions of 'wheel' (but from a compatibility standpoint,
+ # the names are compatible); we should have some way of determining name
+ # compatibility in the same way `wheel` does to avoid having to rename all of
+ # the custom wheels that we build/upload to GCS.
+
+ # Break import style to ensure that setup.py has had a chance to install the
+ # relevant package.
+ from six.moves.urllib import request
+ decorated_path = decorated_basename + GRPC_CUSTOM_BDIST_EXT
+ try:
+ url = BINARIES_REPOSITORY + '/{target}'.format(target=decorated_path)
+ bdist_data = request.urlopen(url).read()
+ except IOError as error:
+ raise CommandError('{}\n\nCould not find the bdist {}: {}'.format(
+ traceback.format_exc(), decorated_path, error.message))
+ # Our chosen local bdist path.
+ bdist_path = target_bdist_basename + GRPC_CUSTOM_BDIST_EXT
+ try:
+ with open(bdist_path, 'w') as bdist_file:
+ bdist_file.write(bdist_data)
+ except IOError as error:
+ raise CommandError('{}\n\nCould not write grpcio bdist: {}'
+ .format(traceback.format_exc(), error.message))
+ return bdist_path
class SphinxDocumentation(setuptools.Command):
- """Command to generate documentation via sphinx."""
-
- description = 'generate sphinx documentation'
- user_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- # We import here to ensure that setup.py has had a chance to install the
- # relevant package eggs first.
- import sphinx
- import sphinx.apidoc
- metadata = self.distribution.metadata
- src_dir = os.path.join(PYTHON_STEM, 'grpc')
- sys.path.append(src_dir)
- sphinx.apidoc.main([
- '', '--force', '--full', '-H', metadata.name, '-A', metadata.author,
- '-V', metadata.version, '-R', metadata.version,
- '-o', os.path.join('doc', 'src'), src_dir])
- conf_filepath = os.path.join('doc', 'src', 'conf.py')
- with open(conf_filepath, 'a') as conf_file:
- conf_file.write(CONF_PY_ADDENDUM)
- glossary_filepath = os.path.join('doc', 'src', 'grpc.rst')
- with open(glossary_filepath, 'a') as glossary_filepath:
- glossary_filepath.write(API_GLOSSARY)
- sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
+ """Command to generate documentation via sphinx."""
+
+ description = 'generate sphinx documentation'
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ # We import here to ensure that setup.py has had a chance to install the
+ # relevant package eggs first.
+ import sphinx
+ import sphinx.apidoc
+ metadata = self.distribution.metadata
+ src_dir = os.path.join(PYTHON_STEM, 'grpc')
+ sys.path.append(src_dir)
+ sphinx.apidoc.main([
+ '', '--force', '--full', '-H', metadata.name, '-A', metadata.author,
+ '-V', metadata.version, '-R', metadata.version, '-o',
+ os.path.join('doc', 'src'), src_dir
+ ])
+ conf_filepath = os.path.join('doc', 'src', 'conf.py')
+ with open(conf_filepath, 'a') as conf_file:
+ conf_file.write(CONF_PY_ADDENDUM)
+ glossary_filepath = os.path.join('doc', 'src', 'grpc.rst')
+ with open(glossary_filepath, 'a') as glossary_filepath:
+ glossary_filepath.write(API_GLOSSARY)
+ sphinx.main(
+ ['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
class BuildProjectMetadata(setuptools.Command):
- """Command to generate project metadata in a module."""
+ """Command to generate project metadata in a module."""
- description = 'build grpcio project metadata files'
- user_options = []
+ description = 'build grpcio project metadata files'
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- with open(os.path.join(PYTHON_STEM, 'grpc/_grpcio_metadata.py'), 'w') as module_file:
- module_file.write('__version__ = """{}"""'.format(
- self.distribution.get_version()))
+ def run(self):
+ with open(os.path.join(PYTHON_STEM, 'grpc/_grpcio_metadata.py'),
+ 'w') as module_file:
+ module_file.write('__version__ = """{}"""'.format(
+ self.distribution.get_version()))
class BuildPy(build_py.build_py):
- """Custom project build command."""
+ """Custom project build command."""
- def run(self):
- self.run_command('build_project_metadata')
- build_py.build_py.run(self)
+ def run(self):
+ self.run_command('build_project_metadata')
+ build_py.build_py.run(self)
def _poison_extensions(extensions, message):
- """Includes a file that will always fail to compile in all extensions."""
- poison_filename = os.path.join(PYTHON_STEM, 'poison.c')
- with open(poison_filename, 'w') as poison:
- poison.write('#error {}'.format(message))
- for extension in extensions:
- extension.sources = [poison_filename]
+ """Includes a file that will always fail to compile in all extensions."""
+ poison_filename = os.path.join(PYTHON_STEM, 'poison.c')
+ with open(poison_filename, 'w') as poison:
+ poison.write('#error {}'.format(message))
+ for extension in extensions:
+ extension.sources = [poison_filename]
+
def check_and_update_cythonization(extensions):
- """Replace .pyx files with their generated counterparts and return whether or
+ """Replace .pyx files with their generated counterparts and return whether or
not cythonization still needs to occur."""
- for extension in extensions:
- generated_pyx_sources = []
- other_sources = []
- for source in extension.sources:
- base, file_ext = os.path.splitext(source)
- if file_ext == '.pyx':
- generated_pyx_source = next(
- (base + gen_ext for gen_ext in ('.c', '.cpp',)
- if os.path.isfile(base + gen_ext)), None)
- if generated_pyx_source:
- generated_pyx_sources.append(generated_pyx_source)
- else:
- sys.stderr.write('Cython-generated files are missing...\n')
- return False
- else:
- other_sources.append(source)
- extension.sources = generated_pyx_sources + other_sources
- sys.stderr.write('Found cython-generated files...\n')
- return True
+ for extension in extensions:
+ generated_pyx_sources = []
+ other_sources = []
+ for source in extension.sources:
+ base, file_ext = os.path.splitext(source)
+ if file_ext == '.pyx':
+ generated_pyx_source = next((base + gen_ext
+ for gen_ext in (
+ '.c',
+ '.cpp',)
+ if os.path.isfile(base + gen_ext)),
+ None)
+ if generated_pyx_source:
+ generated_pyx_sources.append(generated_pyx_source)
+ else:
+ sys.stderr.write('Cython-generated files are missing...\n')
+ return False
+ else:
+ other_sources.append(source)
+ extension.sources = generated_pyx_sources + other_sources
+ sys.stderr.write('Found cython-generated files...\n')
+ return True
+
def try_cythonize(extensions, linetracing=False, mandatory=True):
- """Attempt to cythonize the extensions.
+ """Attempt to cythonize the extensions.
Args:
extensions: A list of `distutils.extension.Extension`.
@@ -226,78 +231,83 @@ def try_cythonize(extensions, linetracing=False, mandatory=True):
mandatory: Whether or not having Cython-generated files is mandatory. If it
is, extensions will be poisoned when they can't be fully generated.
"""
- try:
- # Break import style to ensure we have access to Cython post-setup_requires
- import Cython.Build
- except ImportError:
- if mandatory:
- sys.stderr.write(
- "This package needs to generate C files with Cython but it cannot. "
- "Poisoning extension sources to disallow extension commands...")
- _poison_extensions(
- extensions,
- "Extensions have been poisoned due to missing Cython-generated code.")
- return extensions
- cython_compiler_directives = {}
- if linetracing:
- additional_define_macros = [('CYTHON_TRACE_NOGIL', '1')]
- cython_compiler_directives['linetrace'] = True
- return Cython.Build.cythonize(
- extensions,
- include_path=[
- include_dir for extension in extensions for include_dir in extension.include_dirs
- ] + [CYTHON_STEM],
- compiler_directives=cython_compiler_directives
- )
+ try:
+ # Break import style to ensure we have access to Cython post-setup_requires
+ import Cython.Build
+ except ImportError:
+ if mandatory:
+ sys.stderr.write(
+ "This package needs to generate C files with Cython but it cannot. "
+ "Poisoning extension sources to disallow extension commands...")
+ _poison_extensions(
+ extensions,
+ "Extensions have been poisoned due to missing Cython-generated code."
+ )
+ return extensions
+ cython_compiler_directives = {}
+ if linetracing:
+ additional_define_macros = [('CYTHON_TRACE_NOGIL', '1')]
+ cython_compiler_directives['linetrace'] = True
+ return Cython.Build.cythonize(
+ extensions,
+ include_path=[
+ include_dir
+ for extension in extensions
+ for include_dir in extension.include_dirs
+ ] + [CYTHON_STEM],
+ compiler_directives=cython_compiler_directives)
class BuildExt(build_ext.build_ext):
- """Custom build_ext command to enable compiler-specific flags."""
-
- C_OPTIONS = {
- 'unix': ('-pthread', '-std=gnu99'),
- 'msvc': (),
- }
- LINK_OPTIONS = {}
-
- def build_extensions(self):
- compiler = self.compiler.compiler_type
- if compiler in BuildExt.C_OPTIONS:
- for extension in self.extensions:
- extension.extra_compile_args += list(BuildExt.C_OPTIONS[compiler])
- if compiler in BuildExt.LINK_OPTIONS:
- for extension in self.extensions:
- extension.extra_link_args += list(BuildExt.LINK_OPTIONS[compiler])
- if not check_and_update_cythonization(self.extensions):
- self.extensions = try_cythonize(self.extensions)
- try:
- build_ext.build_ext.build_extensions(self)
- except Exception as error:
- formatted_exception = traceback.format_exc()
- support.diagnose_build_ext_error(self, error, formatted_exception)
- raise CommandError(
- "Failed `build_ext` step:\n{}".format(formatted_exception))
+ """Custom build_ext command to enable compiler-specific flags."""
+
+ C_OPTIONS = {
+ 'unix': ('-pthread', '-std=gnu99'),
+ 'msvc': (),
+ }
+ LINK_OPTIONS = {}
+
+ def build_extensions(self):
+ compiler = self.compiler.compiler_type
+ if compiler in BuildExt.C_OPTIONS:
+ for extension in self.extensions:
+ extension.extra_compile_args += list(BuildExt.C_OPTIONS[
+ compiler])
+ if compiler in BuildExt.LINK_OPTIONS:
+ for extension in self.extensions:
+ extension.extra_link_args += list(BuildExt.LINK_OPTIONS[
+ compiler])
+ if not check_and_update_cythonization(self.extensions):
+ self.extensions = try_cythonize(self.extensions)
+ try:
+ build_ext.build_ext.build_extensions(self)
+ except Exception as error:
+ formatted_exception = traceback.format_exc()
+ support.diagnose_build_ext_error(self, error, formatted_exception)
+ raise CommandError("Failed `build_ext` step:\n{}".format(
+ formatted_exception))
class Gather(setuptools.Command):
- """Command to gather project dependencies."""
-
- description = 'gather dependencies for grpcio'
- user_options = [
- ('test', 't', 'flag indicating to gather test dependencies'),
- ('install', 'i', 'flag indicating to gather install dependencies')
- ]
-
- def initialize_options(self):
- self.test = False
- self.install = False
-
- def finalize_options(self):
- # distutils requires this override.
- pass
-
- def run(self):
- if self.install and self.distribution.install_requires:
- self.distribution.fetch_build_eggs(self.distribution.install_requires)
- if self.test and self.distribution.tests_require:
- self.distribution.fetch_build_eggs(self.distribution.tests_require)
+ """Command to gather project dependencies."""
+
+ description = 'gather dependencies for grpcio'
+ user_options = [
+ ('test', 't', 'flag indicating to gather test dependencies'),
+ ('install', 'i', 'flag indicating to gather install dependencies')
+ ]
+
+ def initialize_options(self):
+ self.test = False
+ self.install = False
+
+ def finalize_options(self):
+ # distutils requires this override.
+ pass
+
+ def run(self):
+ if self.install and self.distribution.install_requires:
+ self.distribution.fetch_build_eggs(
+ self.distribution.install_requires)
+ if self.test and self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(self.distribution.tests_require)
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index e3c10156d0..fe29971799 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""gRPC's Python API."""
import abc
@@ -37,28 +36,27 @@ import six
from grpc._cython import cygrpc as _cygrpc
-
############################## Future Interface ###############################
class FutureTimeoutError(Exception):
- """Indicates that a method call on a Future timed out."""
+ """Indicates that a method call on a Future timed out."""
class FutureCancelledError(Exception):
- """Indicates that the computation underlying a Future was cancelled."""
+ """Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
- """A representation of a computation in another control flow.
+ """A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
- @abc.abstractmethod
- def cancel(self):
- """Attempts to cancel the computation.
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
This method does not block.
@@ -71,11 +69,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
remote system for which a determination of whether or not it commenced
before being cancelled cannot be made without blocking.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def cancelled(self):
- """Describes whether the computation was cancelled.
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
This method does not block.
@@ -85,11 +83,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
not limited to this object's cancel method not having been called and
the computation's result having become immediately available.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def running(self):
- """Describes whether the computation is taking place.
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
This method does not block.
@@ -98,11 +96,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
taking place now, or False if the computation took place in the past or
was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def done(self):
- """Describes whether the computation has taken place.
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
This method does not block.
@@ -111,11 +109,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
unscheduled or interrupted. False if the computation may possibly be
executing or scheduled to execute later.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def result(self, timeout=None):
- """Accesses the outcome of the computation or raises its exception.
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Accesses the outcome of the computation or raises its exception.
This method may return immediately or may block.
@@ -134,11 +132,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def exception(self, timeout=None):
- """Return the exception raised by the computation.
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
This method may return immediately or may block.
@@ -157,11 +155,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def traceback(self, timeout=None):
- """Access the traceback of the exception raised by the computation.
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
@@ -180,11 +178,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_done_callback(self, fn):
- """Adds a function to be called at completion of the computation.
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
@@ -195,7 +193,7 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Args:
fn: A callable taking this Future object as its single parameter.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
################################ gRPC Enums ##################################
@@ -203,7 +201,7 @@ class Future(six.with_metaclass(abc.ABCMeta)):
@enum.unique
class ChannelConnectivity(enum.Enum):
- """Mirrors grpc_connectivity_state in the gRPC Core.
+ """Mirrors grpc_connectivity_state in the gRPC Core.
Attributes:
IDLE: The channel is idle.
@@ -213,81 +211,80 @@ class ChannelConnectivity(enum.Enum):
recover.
SHUTDOWN: The channel has seen a failure from which it cannot recover.
"""
- IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
- CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
- READY = (_cygrpc.ConnectivityState.ready, 'ready')
- TRANSIENT_FAILURE = (
- _cygrpc.ConnectivityState.transient_failure, 'transient failure')
- SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
+ IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
+ CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
+ READY = (_cygrpc.ConnectivityState.ready, 'ready')
+ TRANSIENT_FAILURE = (_cygrpc.ConnectivityState.transient_failure,
+ 'transient failure')
+ SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
@enum.unique
class StatusCode(enum.Enum):
- """Mirrors grpc_status_code in the gRPC Core."""
- OK = (_cygrpc.StatusCode.ok, 'ok')
- CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
- UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
- INVALID_ARGUMENT = (
- _cygrpc.StatusCode.invalid_argument, 'invalid argument')
- DEADLINE_EXCEEDED = (
- _cygrpc.StatusCode.deadline_exceeded, 'deadline exceeded')
- NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
- ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
- PERMISSION_DENIED = (
- _cygrpc.StatusCode.permission_denied, 'permission denied')
- RESOURCE_EXHAUSTED = (
- _cygrpc.StatusCode.resource_exhausted, 'resource exhausted')
- FAILED_PRECONDITION = (
- _cygrpc.StatusCode.failed_precondition, 'failed precondition')
- ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
- OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
- UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
- INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
- UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
- DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
- UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
+ """Mirrors grpc_status_code in the gRPC Core."""
+ OK = (_cygrpc.StatusCode.ok, 'ok')
+ CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
+ UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
+ INVALID_ARGUMENT = (_cygrpc.StatusCode.invalid_argument, 'invalid argument')
+ DEADLINE_EXCEEDED = (_cygrpc.StatusCode.deadline_exceeded,
+ 'deadline exceeded')
+ NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
+ ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
+ PERMISSION_DENIED = (_cygrpc.StatusCode.permission_denied,
+ 'permission denied')
+ RESOURCE_EXHAUSTED = (_cygrpc.StatusCode.resource_exhausted,
+ 'resource exhausted')
+ FAILED_PRECONDITION = (_cygrpc.StatusCode.failed_precondition,
+ 'failed precondition')
+ ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
+ OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
+ UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
+ INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
+ UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
+ DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
+ UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
############################# gRPC Exceptions ################################
class RpcError(Exception):
- """Raised by the gRPC library to indicate non-OK-status RPC termination."""
+ """Raised by the gRPC library to indicate non-OK-status RPC termination."""
############################## Shared Context ################################
class RpcContext(six.with_metaclass(abc.ABCMeta)):
- """Provides RPC-related information and action."""
+ """Provides RPC-related information and action."""
- @abc.abstractmethod
- def is_active(self):
- """Describes whether the RPC is active or has terminated."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def time_remaining(self):
- """Describes the length of allowed time remaining for the RPC.
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out, or None if no deadline was specified for the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def cancel(self):
- """Cancels the RPC.
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
Idempotent and has no effect if the RPC has already terminated.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_callback(self, callback):
- """Registers a callback to be called on RPC termination.
+ @abc.abstractmethod
+ def add_callback(self, callback):
+ """Registers a callback to be called on RPC termination.
Args:
callback: A no-parameter callable to be called on RPC termination.
@@ -297,76 +294,76 @@ class RpcContext(six.with_metaclass(abc.ABCMeta)):
callback was not added and will not later be called (because the RPC
already terminated or some other reason).
"""
- raise NotImplementedError()
+ raise NotImplementedError()
######################### Invocation-Side Context ############################
class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
- """Invocation-side utility object for an RPC."""
+ """Invocation-side utility object for an RPC."""
- @abc.abstractmethod
- def initial_metadata(self):
- """Accesses the initial metadata from the service-side of the RPC.
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata from the service-side of the RPC.
This method blocks until the value is available.
Returns:
The initial :term:`metadata`.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def trailing_metadata(self):
- """Accesses the trailing metadata from the service-side of the RPC.
+ @abc.abstractmethod
+ def trailing_metadata(self):
+ """Accesses the trailing metadata from the service-side of the RPC.
This method blocks until the value is available.
Returns:
The trailing :term:`metadata`.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def code(self):
- """Accesses the status code emitted by the service-side of the RPC.
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the status code emitted by the service-side of the RPC.
This method blocks until the value is available.
Returns:
The StatusCode value for the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def details(self):
- """Accesses the details value emitted by the service-side of the RPC.
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details value emitted by the service-side of the RPC.
This method blocks until the value is available.
Returns:
The details string of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
############ Authentication & Authorization Interfaces & Classes #############
class ChannelCredentials(object):
- """A value encapsulating the data required to create a secure Channel.
+ """A value encapsulating the data required to create a secure Channel.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
- def __init__(self, credentials):
- self._credentials = credentials
+ def __init__(self, credentials):
+ self._credentials = credentials
class CallCredentials(object):
- """A value encapsulating data asserting an identity over a channel.
+ """A value encapsulating data asserting an identity over a channel.
A CallCredentials may be composed with ChannelCredentials to always assert
identity for every call over that Channel.
@@ -375,12 +372,12 @@ class CallCredentials(object):
instances and its instances exist to be passed to other functions.
"""
- def __init__(self, credentials):
- self._credentials = credentials
+ def __init__(self, credentials):
+ self._credentials = credentials
class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
- """Provides information to call credentials metadata plugins.
+ """Provides information to call credentials metadata plugins.
Attributes:
service_url: A string URL of the service being called into.
@@ -389,23 +386,23 @@ class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
- """Callback object received by a metadata plugin."""
+ """Callback object received by a metadata plugin."""
- def __call__(self, metadata, error):
- """Inform the gRPC runtime of the metadata to construct a CallCredentials.
+ def __call__(self, metadata, error):
+ """Inform the gRPC runtime of the metadata to construct a CallCredentials.
Args:
metadata: The :term:`metadata` used to construct the CallCredentials.
error: An Exception to indicate error or None to indicate success.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
- """A specification for custom authentication."""
+ """A specification for custom authentication."""
- def __call__(self, context, callback):
- """Implements authentication by passing metadata to a callback.
+ def __call__(self, context, callback):
+ """Implements authentication by passing metadata to a callback.
Implementations of this method must not block.
@@ -415,29 +412,29 @@ class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
callback: An AuthMetadataPluginCallback to be invoked either synchronously
or asynchronously.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class ServerCredentials(object):
- """A value encapsulating the data required to open a secure port on a Server.
+ """A value encapsulating the data required to open a secure port on a Server.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
- def __init__(self, credentials):
- self._credentials = credentials
+ def __init__(self, credentials):
+ self._credentials = credentials
######################## Multi-Callable Interfaces ###########################
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a unary-unary RPC."""
+ """Affords invoking a unary-unary RPC."""
- @abc.abstractmethod
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ """Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -454,11 +451,11 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def with_call(self, request, timeout=None, metadata=None, credentials=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def with_call(self, request, timeout=None, metadata=None, credentials=None):
+ """Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -475,11 +472,11 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def future(self, request, timeout=None, metadata=None, credentials=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def future(self, request, timeout=None, metadata=None, credentials=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -490,19 +487,19 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
An object that is both a Call for the RPC and a Future. In the event of
- RPC completion, the return Future's result value will be the response
- message of the RPC. Should the event terminate with non-OK status, the
- returned Future's exception value will be an RpcError.
+ RPC completion, the return Call-Future's result value will be the
+ response message of the RPC. Should the event terminate with non-OK
+ status, the returned Call-Future's exception value will be an RpcError.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a unary-stream RPC."""
+ """Affords invoking a unary-stream RPC."""
- @abc.abstractmethod
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- """Invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ """Invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -513,19 +510,22 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
An object that is both a Call for the RPC and an iterator of response
- values. Drawing response values from the returned iterator may raise
- RpcError indicating termination of the RPC with non-OK status.
+ values. Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK status.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a stream-unary RPC in any call style."""
+ """Affords invoking a stream-unary RPC in any call style."""
- @abc.abstractmethod
- def __call__(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ """Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -535,20 +535,22 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
credentials: An optional CallCredentials for the RPC.
Returns:
- The response value for the RPC, and a Call for the RPC if with_call was
- set to True at invocation.
+ The response value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def with_call(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ """Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -565,12 +567,15 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def future(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -581,20 +586,23 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
An object that is both a Call for the RPC and a Future. In the event of
- RPC completion, the return Future's result value will be the response
- message of the RPC. Should the event terminate with non-OK status, the
- returned Future's exception value will be an RpcError.
+ RPC completion, the return Call-Future's result value will be the
+ response message of the RPC. Should the event terminate with non-OK
+ status, the returned Call-Future's exception value will be an RpcError.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a stream-stream RPC in any call style."""
+ """Affords invoking a stream-stream RPC in any call style."""
- @abc.abstractmethod
- def __call__(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- """Invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ """Invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -605,21 +613,21 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
An object that is both a Call for the RPC and an iterator of response
- values. Drawing response values from the returned iterator may raise
- RpcError indicating termination of the RPC with non-OK status.
+ values. Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK status.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
############################# Channel Interface ##############################
class Channel(six.with_metaclass(abc.ABCMeta)):
- """Affords RPC invocation via generic methods."""
+ """Affords RPC invocation via generic methods."""
- @abc.abstractmethod
- def subscribe(self, callback, try_to_connect=False):
- """Subscribes to this Channel's connectivity.
+ @abc.abstractmethod
+ def subscribe(self, callback, try_to_connect=False):
+ """Subscribes to this Channel's connectivity.
Args:
callback: A callable to be invoked and passed a ChannelConnectivity value
@@ -631,22 +639,24 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
attempt to connect if it is not already connected and ready to conduct
RPCs.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unsubscribe(self, callback):
- """Unsubscribes a callback from this Channel's connectivity.
+ @abc.abstractmethod
+ def unsubscribe(self, callback):
+ """Unsubscribes a callback from this Channel's connectivity.
Args:
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unary_unary(
- self, method, request_serializer=None, response_deserializer=None):
- """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+ @abc.abstractmethod
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
@@ -658,12 +668,14 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unary_stream(
- self, method, request_serializer=None, response_deserializer=None):
- """Creates a UnaryStreamMultiCallable for a unary-stream method.
+ @abc.abstractmethod
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
Args:
method: The name of the RPC method.
@@ -675,12 +687,14 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
Returns:
A UnaryStreamMultiCallable value for the name unary-stream method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_unary(
- self, method, request_serializer=None, response_deserializer=None):
- """Creates a StreamUnaryMultiCallable for a stream-unary method.
+ @abc.abstractmethod
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
Args:
method: The name of the RPC method.
@@ -692,12 +706,14 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
Returns:
A StreamUnaryMultiCallable value for the named stream-unary method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_stream(
- self, method, request_serializer=None, response_deserializer=None):
- """Creates a StreamStreamMultiCallable for a stream-stream method.
+ @abc.abstractmethod
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
Args:
method: The name of the RPC method.
@@ -709,36 +725,36 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
Returns:
A StreamStreamMultiCallable value for the named stream-stream method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
########################## Service-Side Context ##############################
class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
- """A context object passed to method implementations."""
+ """A context object passed to method implementations."""
- @abc.abstractmethod
- def invocation_metadata(self):
- """Accesses the metadata from the invocation-side of the RPC.
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the invocation-side of the RPC.
Returns:
The invocation :term:`metadata`.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def peer(self):
- """Identifies the peer that invoked the RPC being serviced.
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
Returns:
A string identifying the peer that invoked the RPC being serviced.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def send_initial_metadata(self, initial_metadata):
- """Sends the initial metadata value to the invocation-side of the RPC.
+ @abc.abstractmethod
+ def send_initial_metadata(self, initial_metadata):
+ """Sends the initial metadata value to the invocation-side of the RPC.
This method need not be called by method implementations if they have no
service-side initial metadata to transmit.
@@ -746,11 +762,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
Args:
initial_metadata: The initial :term:`metadata`.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def set_trailing_metadata(self, trailing_metadata):
- """Accepts the trailing metadata value of the RPC.
+ @abc.abstractmethod
+ def set_trailing_metadata(self, trailing_metadata):
+ """Accepts the trailing metadata value of the RPC.
This method need not be called by method implementations if they have no
service-side trailing metadata to transmit.
@@ -758,11 +774,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
Args:
trailing_metadata: The trailing :term:`metadata`.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def set_code(self, code):
- """Accepts the status code of the RPC.
+ @abc.abstractmethod
+ def set_code(self, code):
+ """Accepts the status code of the RPC.
This method need not be called by method implementations if they wish the
gRPC runtime to determine the status code of the RPC.
@@ -771,11 +787,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
code: A StatusCode value to be transmitted to the invocation side of the
RPC as the status code of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def set_details(self, details):
- """Accepts the service-side details of the RPC.
+ @abc.abstractmethod
+ def set_details(self, details):
+ """Accepts the service-side details of the RPC.
This method need not be called by method implementations if they have no
details to transmit.
@@ -784,14 +800,14 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
details: A string to be transmitted to the invocation side of the RPC as
the status details of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
##################### Service-Side Handler Interfaces ########################
class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
- """An implementation of a single RPC method.
+ """An implementation of a single RPC method.
Attributes:
request_streaming: Whether the RPC supports exactly one request message or
@@ -826,7 +842,7 @@ class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
- """Describes an RPC that has just arrived for service.
+ """Describes an RPC that has just arrived for service.
Attributes:
method: The method name of the RPC.
invocation_metadata: The :term:`metadata` from the invocation side of the RPC.
@@ -834,11 +850,11 @@ class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
- """An implementation of arbitrarily many RPC methods."""
+ """An implementation of arbitrarily many RPC methods."""
- @abc.abstractmethod
- def service(self, handler_call_details):
- """Services an RPC (or not).
+ @abc.abstractmethod
+ def service(self, handler_call_details):
+ """Services an RPC (or not).
Args:
handler_call_details: A HandlerCallDetails describing the RPC.
@@ -847,11 +863,11 @@ class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
An RpcMethodHandler with which the RPC may be serviced, or None to
indicate that this object will not be servicing the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)):
- """An implementation of RPC methods belonging to a service.
+ """An implementation of RPC methods belonging to a service.
A service handles RPC methods with structured names of the form
'/Service.Name/Service.MethodX', where 'Service.Name' is the value
@@ -860,25 +876,25 @@ class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)):
service name.
"""
- @abc.abstractmethod
- def service_name(self):
- """Returns this services name.
+ @abc.abstractmethod
+ def service_name(self):
+ """Returns this services name.
Returns:
The service name.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
############################# Server Interface ###############################
class Server(six.with_metaclass(abc.ABCMeta)):
- """Services RPCs."""
+ """Services RPCs."""
- @abc.abstractmethod
- def add_generic_rpc_handlers(self, generic_rpc_handlers):
- """Registers GenericRpcHandlers with this Server.
+ @abc.abstractmethod
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ """Registers GenericRpcHandlers with this Server.
This method is only safe to call before the server is started.
@@ -886,11 +902,11 @@ class Server(six.with_metaclass(abc.ABCMeta)):
generic_rpc_handlers: An iterable of GenericRpcHandlers that will be used
to service RPCs after this Server is started.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_insecure_port(self, address):
- """Reserves a port for insecure RPC service once this Server becomes active.
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Reserves a port for insecure RPC service once this Server becomes active.
This method may only be called before calling this Server's start method is
called.
@@ -904,11 +920,11 @@ class Server(six.with_metaclass(abc.ABCMeta)):
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_secure_port(self, address, server_credentials):
- """Reserves a port for secure RPC service after this Server becomes active.
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Reserves a port for secure RPC service after this Server becomes active.
This method may only be called before calling this Server's start method is
called.
@@ -923,20 +939,20 @@ class Server(six.with_metaclass(abc.ABCMeta)):
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def start(self):
- """Starts this Server's service of RPCs.
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server's service of RPCs.
This method may only be called while the server is not serving RPCs (i.e. it
is not idempotent).
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stop(self, grace):
- """Stops this Server's service of RPCs.
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server's service of RPCs.
All calls to this method immediately stop service of new RPCs. When existing
RPCs are aborted is controlled by the grace period parameter passed to this
@@ -967,15 +983,16 @@ class Server(six.with_metaclass(abc.ABCMeta)):
at the time it was stopped or if all RPCs that it had underway completed
very early in the grace period).
"""
- raise NotImplementedError()
+ raise NotImplementedError()
################################# Functions ################################
-def unary_unary_rpc_method_handler(
- behavior, request_deserializer=None, response_serializer=None):
- """Creates an RpcMethodHandler for a unary-unary RPC method.
+def unary_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-unary RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
@@ -987,15 +1004,16 @@ def unary_unary_rpc_method_handler(
An RpcMethodHandler for a unary-unary RPC method constructed from the given
parameters.
"""
- from grpc import _utilities
- return _utilities.RpcMethodHandler(
- False, False, request_deserializer, response_serializer, behavior, None,
- None, None)
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(False, False, request_deserializer,
+ response_serializer, behavior, None,
+ None, None)
-def unary_stream_rpc_method_handler(
- behavior, request_deserializer=None, response_serializer=None):
- """Creates an RpcMethodHandler for a unary-stream RPC method.
+def unary_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-stream RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
@@ -1007,15 +1025,16 @@ def unary_stream_rpc_method_handler(
An RpcMethodHandler for a unary-stream RPC method constructed from the
given parameters.
"""
- from grpc import _utilities
- return _utilities.RpcMethodHandler(
- False, True, request_deserializer, response_serializer, None, behavior,
- None, None)
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(False, True, request_deserializer,
+ response_serializer, None, behavior,
+ None, None)
-def stream_unary_rpc_method_handler(
- behavior, request_deserializer=None, response_serializer=None):
- """Creates an RpcMethodHandler for a stream-unary RPC method.
+def stream_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-unary RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
@@ -1027,15 +1046,16 @@ def stream_unary_rpc_method_handler(
An RpcMethodHandler for a stream-unary RPC method constructed from the
given parameters.
"""
- from grpc import _utilities
- return _utilities.RpcMethodHandler(
- True, False, request_deserializer, response_serializer, None, None,
- behavior, None)
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(True, False, request_deserializer,
+ response_serializer, None, None,
+ behavior, None)
-def stream_stream_rpc_method_handler(
- behavior, request_deserializer=None, response_serializer=None):
- """Creates an RpcMethodHandler for a stream-stream RPC method.
+def stream_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-stream RPC method.
Args:
behavior: The implementation of an RPC method as a callable behavior taking
@@ -1048,14 +1068,14 @@ def stream_stream_rpc_method_handler(
An RpcMethodHandler for a stream-stream RPC method constructed from the
given parameters.
"""
- from grpc import _utilities
- return _utilities.RpcMethodHandler(
- True, True, request_deserializer, response_serializer, None, None, None,
- behavior)
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(True, True, request_deserializer,
+ response_serializer, None, None, None,
+ behavior)
def method_handlers_generic_handler(service, method_handlers):
- """Creates a grpc.GenericRpcHandler from RpcMethodHandlers.
+ """Creates a grpc.GenericRpcHandler from RpcMethodHandlers.
Args:
service: A service name to be used for the given method handlers.
@@ -1065,13 +1085,14 @@ def method_handlers_generic_handler(service, method_handlers):
Returns:
A GenericRpcHandler constructed from the given parameters.
"""
- from grpc import _utilities
- return _utilities.DictionaryGenericHandler(service, method_handlers)
+ from grpc import _utilities
+ return _utilities.DictionaryGenericHandler(service, method_handlers)
-def ssl_channel_credentials(
- root_certificates=None, private_key=None, certificate_chain=None):
- """Creates a ChannelCredentials for use with an SSL-enabled Channel.
+def ssl_channel_credentials(root_certificates=None,
+ private_key=None,
+ certificate_chain=None):
+ """Creates a ChannelCredentials for use with an SSL-enabled Channel.
Args:
root_certificates: The PEM-encoded root certificates or unset to ask for
@@ -1084,16 +1105,16 @@ def ssl_channel_credentials(
Returns:
A ChannelCredentials for use with an SSL-enabled Channel.
"""
- if private_key is not None or certificate_chain is not None:
- pair = _cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
- else:
- pair = None
- return ChannelCredentials(
- _cygrpc.channel_credentials_ssl(root_certificates, pair))
+ if private_key is not None or certificate_chain is not None:
+ pair = _cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
+ else:
+ pair = None
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_ssl(root_certificates, pair))
def metadata_call_credentials(metadata_plugin, name=None):
- """Construct CallCredentials from an AuthMetadataPlugin.
+ """Construct CallCredentials from an AuthMetadataPlugin.
Args:
metadata_plugin: An AuthMetadataPlugin to use as the authentication behavior
@@ -1103,21 +1124,21 @@ def metadata_call_credentials(metadata_plugin, name=None):
Returns:
A CallCredentials.
"""
- from grpc import _plugin_wrapping
- if name is None:
- try:
- effective_name = metadata_plugin.__name__
- except AttributeError:
- effective_name = metadata_plugin.__class__.__name__
- else:
- effective_name = name
- return CallCredentials(
- _plugin_wrapping.call_credentials_metadata_plugin(
- metadata_plugin, effective_name))
+ from grpc import _plugin_wrapping
+ if name is None:
+ try:
+ effective_name = metadata_plugin.__name__
+ except AttributeError:
+ effective_name = metadata_plugin.__class__.__name__
+ else:
+ effective_name = name
+ return CallCredentials(
+ _plugin_wrapping.call_credentials_metadata_plugin(metadata_plugin,
+ effective_name))
def access_token_call_credentials(access_token):
- """Construct CallCredentials from an access token.
+ """Construct CallCredentials from an access token.
Args:
access_token: A string to place directly in the http request
@@ -1126,13 +1147,13 @@ def access_token_call_credentials(access_token):
Returns:
A CallCredentials.
"""
- from grpc import _auth
- return metadata_call_credentials(
- _auth.AccessTokenCallCredentials(access_token))
+ from grpc import _auth
+ return metadata_call_credentials(
+ _auth.AccessTokenCallCredentials(access_token))
def composite_call_credentials(*call_credentials):
- """Compose multiple CallCredentials to make a new CallCredentials.
+ """Compose multiple CallCredentials to make a new CallCredentials.
Args:
*call_credentials: At least two CallCredentials objects.
@@ -1140,16 +1161,16 @@ def composite_call_credentials(*call_credentials):
Returns:
A CallCredentials object composed of the given CallCredentials objects.
"""
- from grpc import _credential_composition
- cygrpc_call_credentials = tuple(
- single_call_credentials._credentials
- for single_call_credentials in call_credentials)
- return CallCredentials(
- _credential_composition.call(cygrpc_call_credentials))
+ from grpc import _credential_composition
+ cygrpc_call_credentials = tuple(
+ single_call_credentials._credentials
+ for single_call_credentials in call_credentials)
+ return CallCredentials(
+ _credential_composition.call(cygrpc_call_credentials))
def composite_channel_credentials(channel_credentials, *call_credentials):
- """Compose a ChannelCredentials and one or more CallCredentials objects.
+ """Compose a ChannelCredentials and one or more CallCredentials objects.
Args:
channel_credentials: A ChannelCredentials.
@@ -1159,19 +1180,19 @@ def composite_channel_credentials(channel_credentials, *call_credentials):
A ChannelCredentials composed of the given ChannelCredentials and
CallCredentials objects.
"""
- from grpc import _credential_composition
- cygrpc_call_credentials = tuple(
- single_call_credentials._credentials
- for single_call_credentials in call_credentials)
- return ChannelCredentials(
- _credential_composition.channel(
- channel_credentials._credentials, cygrpc_call_credentials))
+ from grpc import _credential_composition
+ cygrpc_call_credentials = tuple(
+ single_call_credentials._credentials
+ for single_call_credentials in call_credentials)
+ return ChannelCredentials(
+ _credential_composition.channel(channel_credentials._credentials,
+ cygrpc_call_credentials))
-def ssl_server_credentials(
- private_key_certificate_chain_pairs, root_certificates=None,
- require_client_auth=False):
- """Creates a ServerCredentials for use with an SSL-enabled Server.
+def ssl_server_credentials(private_key_certificate_chain_pairs,
+ root_certificates=None,
+ require_client_auth=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
Args:
private_key_certificate_chain_pairs: A nonempty sequence each element of
@@ -1187,23 +1208,23 @@ def ssl_server_credentials(
Returns:
A ServerCredentials for use with an SSL-enabled Server.
"""
- if len(private_key_certificate_chain_pairs) == 0:
- raise ValueError(
- 'At least one private key-certificate chain pair is required!')
- elif require_client_auth and root_certificates is None:
- raise ValueError(
- 'Illegal to require client auth without providing root certificates!')
- else:
- return ServerCredentials(
- _cygrpc.server_credentials_ssl(
- root_certificates,
- [_cygrpc.SslPemKeyCertPair(key, pem)
- for key, pem in private_key_certificate_chain_pairs],
- require_client_auth))
+ if len(private_key_certificate_chain_pairs) == 0:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+ elif require_client_auth and root_certificates is None:
+ raise ValueError(
+ 'Illegal to require client auth without providing root certificates!'
+ )
+ else:
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl(root_certificates, [
+ _cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs
+ ], require_client_auth))
def channel_ready_future(channel):
- """Creates a Future tracking when a Channel is ready.
+ """Creates a Future tracking when a Channel is ready.
Cancelling the returned Future does not tell the given Channel to abandon
attempts it may have been making to connect; cancelling merely deactivates the
@@ -1216,12 +1237,12 @@ def channel_ready_future(channel):
A Future that matures when the given Channel has connectivity
ChannelConnectivity.READY.
"""
- from grpc import _utilities
- return _utilities.channel_ready_future(channel)
+ from grpc import _utilities
+ return _utilities.channel_ready_future(channel)
def insecure_channel(target, options=None):
- """Creates an insecure Channel to a server.
+ """Creates an insecure Channel to a server.
Args:
target: The target to which to connect.
@@ -1231,12 +1252,12 @@ def insecure_channel(target, options=None):
Returns:
A Channel to the target through which RPCs may be conducted.
"""
- from grpc import _channel
- return _channel.Channel(target, () if options is None else options, None)
+ from grpc import _channel
+ return _channel.Channel(target, () if options is None else options, None)
def secure_channel(target, credentials, options=None):
- """Creates a secure Channel to a server.
+ """Creates a secure Channel to a server.
Args:
target: The target to which to connect.
@@ -1247,13 +1268,13 @@ def secure_channel(target, credentials, options=None):
Returns:
A Channel to the target through which RPCs may be conducted.
"""
- from grpc import _channel
- return _channel.Channel(target, () if options is None else options,
- credentials._credentials)
+ from grpc import _channel
+ return _channel.Channel(target, () if options is None else options,
+ credentials._credentials)
def server(thread_pool, handlers=None, options=None):
- """Creates a Server with which RPCs can be serviced.
+ """Creates a Server with which RPCs can be serviced.
Args:
thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server
@@ -1269,14 +1290,13 @@ def server(thread_pool, handlers=None, options=None):
Returns:
A Server with which RPCs can be serviced.
"""
- from grpc import _server
- return _server.Server(thread_pool, () if handlers is None else handlers,
- () if options is None else options)
+ from grpc import _server
+ return _server.Server(thread_pool, () if handlers is None else handlers, ()
+ if options is None else options)
################################### __all__ #################################
-
__all__ = (
'FutureTimeoutError',
'FutureCancelledError',
@@ -1317,26 +1337,23 @@ __all__ = (
'channel_ready_future',
'insecure_channel',
'secure_channel',
- 'server',
-)
-
+ 'server',)
############################### Extension Shims ################################
-
# Here to maintain backwards compatibility; avoid using these in new code!
try:
- import grpc_tools
- sys.modules.update({'grpc.tools': grpc_tools})
+ import grpc_tools
+ sys.modules.update({'grpc.tools': grpc_tools})
except ImportError:
- pass
+ pass
try:
- import grpc_health
- sys.modules.update({'grpc.health': grpc_health})
+ import grpc_health
+ sys.modules.update({'grpc.health': grpc_health})
except ImportError:
- pass
+ pass
try:
- import grpc_reflection
- sys.modules.update({'grpc.reflection': grpc_reflection})
+ import grpc_reflection
+ sys.modules.update({'grpc.reflection': grpc_reflection})
except ImportError:
- pass
+ pass
diff --git a/src/python/grpcio/grpc/_auth.py b/src/python/grpcio/grpc/_auth.py
index dea3221c9d..e8a90cf504 100644
--- a/src/python/grpcio/grpc/_auth.py
+++ b/src/python/grpcio/grpc/_auth.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""GRPCAuthMetadataPlugins for standard authentication."""
import inspect
@@ -36,51 +35,53 @@ import grpc
def _sign_request(callback, token, error):
- metadata = (('authorization', 'Bearer {}'.format(token)),)
- callback(metadata, error)
+ metadata = (('authorization', 'Bearer {}'.format(token)),)
+ callback(metadata, error)
class GoogleCallCredentials(grpc.AuthMetadataPlugin):
- """Metadata wrapper for GoogleCredentials from the oauth2client library."""
-
- def __init__(self, credentials):
- self._credentials = credentials
- self._pool = futures.ThreadPoolExecutor(max_workers=1)
-
- # Hack to determine if these are JWT creds and we need to pass
- # additional_claims when getting a token
- if 'additional_claims' in inspect.getargspec(
- credentials.get_access_token).args:
- self._is_jwt = True
- else:
- self._is_jwt = False
-
- def __call__(self, context, callback):
- # MetadataPlugins cannot block (see grpc.beta.interfaces.py)
- if self._is_jwt:
- future = self._pool.submit(self._credentials.get_access_token,
- additional_claims={'aud': context.service_url})
- else:
- future = self._pool.submit(self._credentials.get_access_token)
- future.add_done_callback(lambda x: self._get_token_callback(callback, x))
-
- def _get_token_callback(self, callback, future):
- try:
- access_token = future.result().access_token
- except Exception as e:
- _sign_request(callback, None, e)
- else:
- _sign_request(callback, access_token, None)
-
- def __del__(self):
- self._pool.shutdown(wait=False)
+ """Metadata wrapper for GoogleCredentials from the oauth2client library."""
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+ self._pool = futures.ThreadPoolExecutor(max_workers=1)
+
+ # Hack to determine if these are JWT creds and we need to pass
+ # additional_claims when getting a token
+ if 'additional_claims' in inspect.getargspec(
+ credentials.get_access_token).args:
+ self._is_jwt = True
+ else:
+ self._is_jwt = False
+
+ def __call__(self, context, callback):
+ # MetadataPlugins cannot block (see grpc.beta.interfaces.py)
+ if self._is_jwt:
+ future = self._pool.submit(
+ self._credentials.get_access_token,
+ additional_claims={'aud': context.service_url})
+ else:
+ future = self._pool.submit(self._credentials.get_access_token)
+ future.add_done_callback(
+ lambda x: self._get_token_callback(callback, x))
+
+ def _get_token_callback(self, callback, future):
+ try:
+ access_token = future.result().access_token
+ except Exception as e:
+ _sign_request(callback, None, e)
+ else:
+ _sign_request(callback, access_token, None)
+
+ def __del__(self):
+ self._pool.shutdown(wait=False)
class AccessTokenCallCredentials(grpc.AuthMetadataPlugin):
- """Metadata wrapper for raw access token credentials."""
+ """Metadata wrapper for raw access token credentials."""
- def __init__(self, access_token):
- self._access_token = access_token
+ def __init__(self, access_token):
+ self._access_token = access_token
- def __call__(self, context, callback):
- _sign_request(callback, self._access_token, None)
+ def __call__(self, context, callback):
+ _sign_request(callback, self._access_token, None)
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index e8c6a99cb1..77412236cc 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Invocation-side implementation of gRPC Python."""
import sys
@@ -52,692 +51,710 @@ _UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
- cygrpc.OperationType.receive_status_on_client,
-)
+ cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
- cygrpc.OperationType.receive_status_on_client,
-)
+ cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
- cygrpc.OperationType.receive_status_on_client,
-)
+ cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
- cygrpc.OperationType.receive_status_on_client,
-)
+ cygrpc.OperationType.receive_status_on_client,)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
def _deadline(timeout):
- if timeout is None:
- return None, _INFINITE_FUTURE
- else:
- deadline = time.time() + timeout
- return deadline, cygrpc.Timespec(deadline)
+ if timeout is None:
+ return None, _INFINITE_FUTURE
+ else:
+ deadline = time.time() + timeout
+ return deadline, cygrpc.Timespec(deadline)
def _unknown_code_details(unknown_cygrpc_code, details):
- return 'Server sent unknown code {} and details "{}"'.format(
- unknown_cygrpc_code, details)
+ return 'Server sent unknown code {} and details "{}"'.format(
+ unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
- if until is None:
- condition.wait()
- else:
- remaining = until - time.time()
- if remaining < 0:
- raise grpc.FutureTimeoutError()
+ if until is None:
+ condition.wait()
else:
- condition.wait(timeout=remaining)
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ condition.wait(timeout=remaining)
+
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
'Internal gRPC call error %d. ' +
'Please report to https://github.com/grpc/grpc/issues')
+
def _check_call_error(call_error, metadata):
- if call_error == cygrpc.CallError.invalid_metadata:
- raise ValueError('metadata was invalid: %s' % metadata)
- elif call_error != cygrpc.CallError.ok:
- raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
+ if call_error == cygrpc.CallError.invalid_metadata:
+ raise ValueError('metadata was invalid: %s' % metadata)
+ elif call_error != cygrpc.CallError.ok:
+ raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
+
def _call_error_set_RPCstate(state, call_error, metadata):
- if call_error == cygrpc.CallError.invalid_metadata:
- _abort(state, grpc.StatusCode.INTERNAL, 'metadata was invalid: %s' % metadata)
- else:
- _abort(state, grpc.StatusCode.INTERNAL,
- _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
+ if call_error == cygrpc.CallError.invalid_metadata:
+ _abort(state, grpc.StatusCode.INTERNAL,
+ 'metadata was invalid: %s' % metadata)
+ else:
+ _abort(state, grpc.StatusCode.INTERNAL,
+ _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
+
class _RPCState(object):
- def __init__(self, due, initial_metadata, trailing_metadata, code, details):
- self.condition = threading.Condition()
- # The cygrpc.OperationType objects representing events due from the RPC's
- # completion queue.
- self.due = set(due)
- self.initial_metadata = initial_metadata
- self.response = None
- self.trailing_metadata = trailing_metadata
- self.code = code
- self.details = details
- # The semantics of grpc.Future.cancel and grpc.Future.cancelled are
- # slightly wonky, so they have to be tracked separately from the rest of the
- # result of the RPC. This field tracks whether cancellation was requested
- # prior to termination of the RPC.
- self.cancelled = False
- self.callbacks = []
+ def __init__(self, due, initial_metadata, trailing_metadata, code, details):
+ self.condition = threading.Condition()
+ # The cygrpc.OperationType objects representing events due from the RPC's
+ # completion queue.
+ self.due = set(due)
+ self.initial_metadata = initial_metadata
+ self.response = None
+ self.trailing_metadata = trailing_metadata
+ self.code = code
+ self.details = details
+ # The semantics of grpc.Future.cancel and grpc.Future.cancelled are
+ # slightly wonky, so they have to be tracked separately from the rest of the
+ # result of the RPC. This field tracks whether cancellation was requested
+ # prior to termination of the RPC.
+ self.cancelled = False
+ self.callbacks = []
def _abort(state, code, details):
- if state.code is None:
- state.code = code
- state.details = details
- if state.initial_metadata is None:
- state.initial_metadata = _EMPTY_METADATA
- state.trailing_metadata = _EMPTY_METADATA
+ if state.code is None:
+ state.code = code
+ state.details = details
+ if state.initial_metadata is None:
+ state.initial_metadata = _EMPTY_METADATA
+ state.trailing_metadata = _EMPTY_METADATA
def _handle_event(event, state, response_deserializer):
- callbacks = []
- for batch_operation in event.batch_operations:
- operation_type = batch_operation.type
- state.due.remove(operation_type)
- if operation_type == cygrpc.OperationType.receive_initial_metadata:
- state.initial_metadata = batch_operation.received_metadata
- elif operation_type == cygrpc.OperationType.receive_message:
- serialized_response = batch_operation.received_message.bytes()
- if serialized_response is not None:
- response = _common.deserialize(
- serialized_response, response_deserializer)
- if response is None:
- details = 'Exception deserializing response!'
- _abort(state, grpc.StatusCode.INTERNAL, details)
- else:
- state.response = response
- elif operation_type == cygrpc.OperationType.receive_status_on_client:
- state.trailing_metadata = batch_operation.received_metadata
- if state.code is None:
- code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
- batch_operation.received_status_code)
- if code is None:
- state.code = grpc.StatusCode.UNKNOWN
- state.details = _unknown_code_details(
- batch_operation.received_status_code,
- batch_operation.received_status_details)
- else:
- state.code = code
- state.details = batch_operation.received_status_details
- callbacks.extend(state.callbacks)
- state.callbacks = None
- return callbacks
+ callbacks = []
+ for batch_operation in event.batch_operations:
+ operation_type = batch_operation.type
+ state.due.remove(operation_type)
+ if operation_type == cygrpc.OperationType.receive_initial_metadata:
+ state.initial_metadata = batch_operation.received_metadata
+ elif operation_type == cygrpc.OperationType.receive_message:
+ serialized_response = batch_operation.received_message.bytes()
+ if serialized_response is not None:
+ response = _common.deserialize(serialized_response,
+ response_deserializer)
+ if response is None:
+ details = 'Exception deserializing response!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ else:
+ state.response = response
+ elif operation_type == cygrpc.OperationType.receive_status_on_client:
+ state.trailing_metadata = batch_operation.received_metadata
+ if state.code is None:
+ code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
+ batch_operation.received_status_code)
+ if code is None:
+ state.code = grpc.StatusCode.UNKNOWN
+ state.details = _unknown_code_details(
+ batch_operation.received_status_code,
+ batch_operation.received_status_details)
+ else:
+ state.code = code
+ state.details = batch_operation.received_status_details
+ callbacks.extend(state.callbacks)
+ state.callbacks = None
+ return callbacks
def _event_handler(state, call, response_deserializer):
- def handle_event(event):
- with state.condition:
- callbacks = _handle_event(event, state, response_deserializer)
- state.condition.notify_all()
- done = not state.due
- for callback in callbacks:
- callback()
- return call if done else None
- return handle_event
-
-
-def _consume_request_iterator(
- request_iterator, state, call, request_serializer):
- event_handler = _event_handler(state, call, None)
-
- def consume_request_iterator():
- while True:
- try:
- request = next(request_iterator)
- except StopIteration:
- break
- except Exception as e:
- logging.exception("Exception iterating requests!")
- call.cancel()
- _abort(state, grpc.StatusCode.UNKNOWN, "Exception iterating requests!")
- return
- serialized_request = _common.serialize(request, request_serializer)
- with state.condition:
- if state.code is None and not state.cancelled:
- if serialized_request is None:
- call.cancel()
- details = 'Exception serializing request!'
- _abort(state, grpc.StatusCode.INTERNAL, details)
- return
- else:
- operations = (
- cygrpc.operation_send_message(
- serialized_request, _EMPTY_FLAGS),
- )
- call.start_client_batch(cygrpc.Operations(operations),
- event_handler)
- state.due.add(cygrpc.OperationType.send_message)
- while True:
- state.condition.wait()
- if state.code is None:
- if cygrpc.OperationType.send_message not in state.due:
- break
- else:
+
+ def handle_event(event):
+ with state.condition:
+ callbacks = _handle_event(event, state, response_deserializer)
+ state.condition.notify_all()
+ done = not state.due
+ for callback in callbacks:
+ callback()
+ return call if done else None
+
+ return handle_event
+
+
+def _consume_request_iterator(request_iterator, state, call,
+ request_serializer):
+ event_handler = _event_handler(state, call, None)
+
+ def consume_request_iterator():
+ while True:
+ try:
+ request = next(request_iterator)
+ except StopIteration:
+ break
+ except Exception as e:
+ logging.exception("Exception iterating requests!")
+ call.cancel()
+ _abort(state, grpc.StatusCode.UNKNOWN,
+ "Exception iterating requests!")
return
- else:
- return
- with state.condition:
- if state.code is None:
- operations = (
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- )
- call.start_client_batch(cygrpc.Operations(operations), event_handler)
- state.due.add(cygrpc.OperationType.send_close_from_client)
-
- def stop_consumption_thread(timeout):
- with state.condition:
- if state.code is None:
- call.cancel()
- state.cancelled = True
- _abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
- state.condition.notify_all()
-
- consumption_thread = _common.CleanupThread(
- stop_consumption_thread, target=consume_request_iterator)
- consumption_thread.start()
+ serialized_request = _common.serialize(request, request_serializer)
+ with state.condition:
+ if state.code is None and not state.cancelled:
+ if serialized_request is None:
+ call.cancel()
+ details = 'Exception serializing request!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ return
+ else:
+ operations = (cygrpc.operation_send_message(
+ serialized_request, _EMPTY_FLAGS),)
+ call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ state.due.add(cygrpc.OperationType.send_message)
+ while True:
+ state.condition.wait()
+ if state.code is None:
+ if cygrpc.OperationType.send_message not in state.due:
+ break
+ else:
+ return
+ else:
+ return
+ with state.condition:
+ if state.code is None:
+ operations = (
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
+ call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ state.due.add(cygrpc.OperationType.send_close_from_client)
+
+ def stop_consumption_thread(timeout):
+ with state.condition:
+ if state.code is None:
+ call.cancel()
+ state.cancelled = True
+ _abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
+ state.condition.notify_all()
+
+ consumption_thread = _common.CleanupThread(
+ stop_consumption_thread, target=consume_request_iterator)
+ consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
- def __init__(self, state, call, response_deserializer, deadline):
- super(_Rendezvous, self).__init__()
- self._state = state
- self._call = call
- self._response_deserializer = response_deserializer
- self._deadline = deadline
-
- def cancel(self):
- with self._state.condition:
- if self._state.code is None:
- self._call.cancel()
- self._state.cancelled = True
- _abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
- self._state.condition.notify_all()
- return False
-
- def cancelled(self):
- with self._state.condition:
- return self._state.cancelled
-
- def running(self):
- with self._state.condition:
- return self._state.code is None
-
- def done(self):
- with self._state.condition:
- return self._state.code is not None
-
- def result(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._state.condition:
- while True:
- if self._state.code is None:
- _wait_once_until(self._state.condition, until)
- elif self._state.code is grpc.StatusCode.OK:
- return self._state.response
- elif self._state.cancelled:
- raise grpc.FutureCancelledError()
- else:
- raise self
-
- def exception(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._state.condition:
- while True:
- if self._state.code is None:
- _wait_once_until(self._state.condition, until)
- elif self._state.code is grpc.StatusCode.OK:
- return None
- elif self._state.cancelled:
- raise grpc.FutureCancelledError()
- else:
- return self
-
- def traceback(self, timeout=None):
- until = None if timeout is None else time.time() + timeout
- with self._state.condition:
- while True:
- if self._state.code is None:
- _wait_once_until(self._state.condition, until)
- elif self._state.code is grpc.StatusCode.OK:
- return None
- elif self._state.cancelled:
- raise grpc.FutureCancelledError()
+ def __init__(self, state, call, response_deserializer, deadline):
+ super(_Rendezvous, self).__init__()
+ self._state = state
+ self._call = call
+ self._response_deserializer = response_deserializer
+ self._deadline = deadline
+
+ def cancel(self):
+ with self._state.condition:
+ if self._state.code is None:
+ self._call.cancel()
+ self._state.cancelled = True
+ _abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
+ self._state.condition.notify_all()
+ return False
+
+ def cancelled(self):
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self):
+ with self._state.condition:
+ return self._state.code is not None
+
+ def result(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn):
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(lambda: fn(self))
+ return
+
+ fn(self)
+
+ def _next(self):
+ with self._state.condition:
+ if self._state.code is None:
+ event_handler = _event_handler(self._state, self._call,
+ self._response_deserializer)
+ self._call.start_client_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ event_handler)
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+ while True:
+ self._state.condition.wait()
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+ def is_active(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def time_remaining(self):
+ if self._deadline is None:
+ return None
else:
- try:
- raise self
- except grpc.RpcError:
- return sys.exc_info()[2]
-
- def add_done_callback(self, fn):
- with self._state.condition:
- if self._state.code is None:
- self._state.callbacks.append(lambda: fn(self))
- return
-
- fn(self)
-
- def _next(self):
- with self._state.condition:
- if self._state.code is None:
- event_handler = _event_handler(
- self._state, self._call, self._response_deserializer)
- self._call.start_client_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
- event_handler)
- self._state.due.add(cygrpc.OperationType.receive_message)
- elif self._state.code is grpc.StatusCode.OK:
- raise StopIteration()
- else:
- raise self
- while True:
- self._state.condition.wait()
- if self._state.response is not None:
- response = self._state.response
- self._state.response = None
- return response
- elif cygrpc.OperationType.receive_message not in self._state.due:
- if self._state.code is grpc.StatusCode.OK:
- raise StopIteration()
- elif self._state.code is not None:
- raise self
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self._next()
-
- def next(self):
- return self._next()
-
- def is_active(self):
- with self._state.condition:
- return self._state.code is None
-
- def time_remaining(self):
- if self._deadline is None:
- return None
- else:
- return max(self._deadline - time.time(), 0)
-
- def add_callback(self, callback):
- with self._state.condition:
- if self._state.callbacks is None:
- return False
- else:
- self._state.callbacks.append(callback)
- return True
-
- def initial_metadata(self):
- with self._state.condition:
- while self._state.initial_metadata is None:
- self._state.condition.wait()
- return _common.application_metadata(self._state.initial_metadata)
-
- def trailing_metadata(self):
- with self._state.condition:
- while self._state.trailing_metadata is None:
- self._state.condition.wait()
- return _common.application_metadata(self._state.trailing_metadata)
-
- def code(self):
- with self._state.condition:
- while self._state.code is None:
- self._state.condition.wait()
- return self._state.code
-
- def details(self):
- with self._state.condition:
- while self._state.details is None:
- self._state.condition.wait()
- return _common.decode(self._state.details)
-
- def _repr(self):
- with self._state.condition:
- if self._state.code is None:
- return '<_Rendezvous object of in-flight RPC>'
- else:
- return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
- self._state.code, _common.decode(self._state.details))
-
- def __repr__(self):
- return self._repr()
-
- def __str__(self):
- return self._repr()
-
- def __del__(self):
- with self._state.condition:
- if self._state.code is None:
- self._call.cancel()
- self._state.cancelled = True
- self._state.code = grpc.StatusCode.CANCELLED
- self._state.condition.notify_all()
+ return max(self._deadline - time.time(), 0)
+
+ def add_callback(self, callback):
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def initial_metadata(self):
+ with self._state.condition:
+ while self._state.initial_metadata is None:
+ self._state.condition.wait()
+ return _common.application_metadata(self._state.initial_metadata)
+
+ def trailing_metadata(self):
+ with self._state.condition:
+ while self._state.trailing_metadata is None:
+ self._state.condition.wait()
+ return _common.application_metadata(self._state.trailing_metadata)
+
+ def code(self):
+ with self._state.condition:
+ while self._state.code is None:
+ self._state.condition.wait()
+ return self._state.code
+
+ def details(self):
+ with self._state.condition:
+ while self._state.details is None:
+ self._state.condition.wait()
+ return _common.decode(self._state.details)
+
+ def _repr(self):
+ with self._state.condition:
+ if self._state.code is None:
+ return '<_Rendezvous object of in-flight RPC>'
+ else:
+ return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
+ self._state.code, _common.decode(self._state.details))
+
+ def __repr__(self):
+ return self._repr()
+
+ def __str__(self):
+ return self._repr()
+
+ def __del__(self):
+ with self._state.condition:
+ if self._state.code is None:
+ self._call.cancel()
+ self._state.cancelled = True
+ self._state.code = grpc.StatusCode.CANCELLED
+ self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
- deadline, deadline_timespec = _deadline(timeout)
- serialized_request = _common.serialize(request, request_serializer)
- if serialized_request is None:
- state = _RPCState(
- (), _EMPTY_METADATA, _EMPTY_METADATA, grpc.StatusCode.INTERNAL,
- 'Exception serializing request!')
- rendezvous = _Rendezvous(state, None, None, deadline)
- return deadline, deadline_timespec, None, rendezvous
- else:
- return deadline, deadline_timespec, serialized_request, None
+ deadline, deadline_timespec = _deadline(timeout)
+ serialized_request = _common.serialize(request, request_serializer)
+ if serialized_request is None:
+ state = _RPCState((), _EMPTY_METADATA, _EMPTY_METADATA,
+ grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ rendezvous = _Rendezvous(state, None, None, deadline)
+ return deadline, deadline_timespec, None, rendezvous
+ else:
+ return deadline, deadline_timespec, serialized_request, None
def _end_unary_response_blocking(state, with_call, deadline):
- if state.code is grpc.StatusCode.OK:
- if with_call:
- rendezvous = _Rendezvous(state, None, None, deadline)
- return state.response, rendezvous
+ if state.code is grpc.StatusCode.OK:
+ if with_call:
+ rendezvous = _Rendezvous(state, None, None, deadline)
+ return state.response, rendezvous
+ else:
+ return state.response
else:
- return state.response
- else:
- raise _Rendezvous(state, None, None, deadline)
+ raise _Rendezvous(state, None, None, deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
- def __init__(
- self, channel, managed_call, method, request_serializer,
- response_deserializer):
- self._channel = channel
- self._managed_call = managed_call
- self._method = method
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def _prepare(self, request, timeout, metadata):
- deadline, deadline_timespec, serialized_request, rendezvous = (
- _start_unary_request(request, timeout, self._request_serializer))
- if serialized_request is None:
- return None, None, None, None, rendezvous
- else:
- state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
- cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- return state, operations, deadline, deadline_timespec, None
-
- def _blocking(self, request, timeout, metadata, credentials):
- state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
- request, timeout, metadata)
- if rendezvous:
- raise rendezvous
- else:
- completion_queue = cygrpc.CompletionQueue()
- call = self._channel.create_call(
- None, 0, completion_queue, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- call_error = call.start_client_batch(cygrpc.Operations(operations), None)
- _check_call_error(call_error, metadata)
- _handle_event(completion_queue.poll(), state, self._response_deserializer)
- return state, deadline
-
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- state, deadline, = self._blocking(request, timeout, metadata, credentials)
- return _end_unary_response_blocking(state, False, deadline)
-
- def with_call(self, request, timeout=None, metadata=None, credentials=None):
- state, deadline, = self._blocking(request, timeout, metadata, credentials)
- return _end_unary_response_blocking(state, True, deadline)
-
- def future(self, request, timeout=None, metadata=None, credentials=None):
- state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
- request, timeout, metadata)
- if rendezvous:
- return rendezvous
- else:
- call, drive_call = self._managed_call(
- None, 0, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- event_handler = _event_handler(state, call, self._response_deserializer)
- with state.condition:
- call_error = call.start_client_batch(cygrpc.Operations(operations),
- event_handler)
- if call_error != cygrpc.CallError.ok:
- _call_error_set_RPCstate(state, call_error, metadata)
- return _Rendezvous(state, None, None, deadline)
- drive_call()
- return _Rendezvous(state, call, self._response_deserializer, deadline)
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def _prepare(self, request, timeout, metadata):
+ deadline, deadline_timespec, serialized_request, rendezvous = (
+ _start_unary_request(request, timeout, self._request_serializer))
+ if serialized_request is None:
+ return None, None, None, None, rendezvous
+ else:
+ state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ return state, operations, deadline, deadline_timespec, None
+
+ def _blocking(self, request, timeout, metadata, credentials):
+ state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
+ request, timeout, metadata)
+ if rendezvous:
+ raise rendezvous
+ else:
+ completion_queue = cygrpc.CompletionQueue()
+ call = self._channel.create_call(None, 0, completion_queue,
+ self._method, None,
+ deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), None)
+ _check_call_error(call_error, metadata)
+ _handle_event(completion_queue.poll(), state,
+ self._response_deserializer)
+ return state, deadline
+
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ state, deadline, = self._blocking(request, timeout, metadata,
+ credentials)
+ return _end_unary_response_blocking(state, False, deadline)
+
+ def with_call(self, request, timeout=None, metadata=None, credentials=None):
+ state, deadline, = self._blocking(request, timeout, metadata,
+ credentials)
+ return _end_unary_response_blocking(state, True, deadline)
+
+ def future(self, request, timeout=None, metadata=None, credentials=None):
+ state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
+ request, timeout, metadata)
+ if rendezvous:
+ return rendezvous
+ else:
+ call, drive_call = self._managed_call(None, 0, self._method, None,
+ deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call,
+ self._response_deserializer)
+ with state.condition:
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ if call_error != cygrpc.CallError.ok:
+ _call_error_set_RPCstate(state, call_error, metadata)
+ return _Rendezvous(state, None, None, deadline)
+ drive_call()
+ return _Rendezvous(state, call, self._response_deserializer,
+ deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
- def __init__(
- self, channel, managed_call, method, request_serializer,
- response_deserializer):
- self._channel = channel
- self._managed_call = managed_call
- self._method = method
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- deadline, deadline_timespec, serialized_request, rendezvous = (
- _start_unary_request(request, timeout, self._request_serializer))
- if serialized_request is None:
- raise rendezvous
- else:
- state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
- call, drive_call = self._managed_call(
- None, 0, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- event_handler = _event_handler(state, call, self._response_deserializer)
- with state.condition:
- call.start_client_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
- event_handler)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
- cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- call_error = call.start_client_batch(cygrpc.Operations(operations),
- event_handler)
- if call_error != cygrpc.CallError.ok:
- _call_error_set_RPCstate(state, call_error, metadata)
- return _Rendezvous(state, None, None, deadline)
- drive_call()
- return _Rendezvous(state, call, self._response_deserializer, deadline)
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ deadline, deadline_timespec, serialized_request, rendezvous = (
+ _start_unary_request(request, timeout, self._request_serializer))
+ if serialized_request is None:
+ raise rendezvous
+ else:
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ call, drive_call = self._managed_call(None, 0, self._method, None,
+ deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call,
+ self._response_deserializer)
+ with state.condition:
+ call.start_client_batch(
+ cygrpc.Operations((
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ )), event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_request,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ if call_error != cygrpc.CallError.ok:
+ _call_error_set_RPCstate(state, call_error, metadata)
+ return _Rendezvous(state, None, None, deadline)
+ drive_call()
+ return _Rendezvous(state, call, self._response_deserializer,
+ deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
- def __init__(
- self, channel, managed_call, method, request_serializer,
- response_deserializer):
- self._channel = channel
- self._managed_call = managed_call
- self._method = method
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def _blocking(self, request_iterator, timeout, metadata, credentials):
- deadline, deadline_timespec = _deadline(timeout)
- state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
- completion_queue = cygrpc.CompletionQueue()
- call = self._channel.create_call(
- None, 0, completion_queue, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- with state.condition:
- call.start_client_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
- None)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- call_error = call.start_client_batch(cygrpc.Operations(operations), None)
- _check_call_error(call_error, metadata)
- _consume_request_iterator(
- request_iterator, state, call, self._request_serializer)
- while True:
- event = completion_queue.poll()
- with state.condition:
- _handle_event(event, state, self._response_deserializer)
- state.condition.notify_all()
- if not state.due:
- break
- return state, deadline
-
- def __call__(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- state, deadline, = self._blocking(
- request_iterator, timeout, metadata, credentials)
- return _end_unary_response_blocking(state, False, deadline)
-
- def with_call(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- state, deadline, = self._blocking(
- request_iterator, timeout, metadata, credentials)
- return _end_unary_response_blocking(state, True, deadline)
-
- def future(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- deadline, deadline_timespec = _deadline(timeout)
- state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
- call, drive_call = self._managed_call(
- None, 0, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- event_handler = _event_handler(state, call, self._response_deserializer)
- with state.condition:
- call.start_client_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
- event_handler)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- call_error = call.start_client_batch(cygrpc.Operations(operations),
- event_handler)
- if call_error != cygrpc.CallError.ok:
- _call_error_set_RPCstate(state, call_error, metadata)
- return _Rendezvous(state, None, None, deadline)
- drive_call()
- _consume_request_iterator(
- request_iterator, state, call, self._request_serializer)
- return _Rendezvous(state, call, self._response_deserializer, deadline)
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def _blocking(self, request_iterator, timeout, metadata, credentials):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ completion_queue = cygrpc.CompletionQueue()
+ call = self._channel.create_call(None, 0, completion_queue,
+ self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ with state.condition:
+ call.start_client_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ None)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), None)
+ _check_call_error(call_error, metadata)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer)
+ while True:
+ event = completion_queue.poll()
+ with state.condition:
+ _handle_event(event, state, self._response_deserializer)
+ state.condition.notify_all()
+ if not state.due:
+ break
+ return state, deadline
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ state, deadline, = self._blocking(request_iterator, timeout, metadata,
+ credentials)
+ return _end_unary_response_blocking(state, False, deadline)
+
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ state, deadline, = self._blocking(request_iterator, timeout, metadata,
+ credentials)
+ return _end_unary_response_blocking(state, True, deadline)
+
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ call, drive_call = self._managed_call(None, 0, self._method, None,
+ deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_client_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ if call_error != cygrpc.CallError.ok:
+ _call_error_set_RPCstate(state, call_error, metadata)
+ return _Rendezvous(state, None, None, deadline)
+ drive_call()
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
- def __init__(
- self, channel, managed_call, method, request_serializer,
- response_deserializer):
- self._channel = channel
- self._managed_call = managed_call
- self._method = method
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(
- self, request_iterator, timeout=None, metadata=None, credentials=None):
- deadline, deadline_timespec = _deadline(timeout)
- state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
- call, drive_call = self._managed_call(
- None, 0, self._method, None, deadline_timespec)
- if credentials is not None:
- call.set_credentials(credentials._credentials)
- event_handler = _event_handler(state, call, self._response_deserializer)
- with state.condition:
- call.start_client_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
- event_handler)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- call_error = call.start_client_batch(cygrpc.Operations(operations),
- event_handler)
- if call_error != cygrpc.CallError.ok:
- _call_error_set_RPCstate(state, call_error, metadata)
- return _Rendezvous(state, None, None, deadline)
- drive_call()
- _consume_request_iterator(
- request_iterator, state, call, self._request_serializer)
- return _Rendezvous(state, call, self._response_deserializer, deadline)
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
+ call, drive_call = self._managed_call(None, 0, self._method, None,
+ deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_client_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ call_error = call.start_client_batch(
+ cygrpc.Operations(operations), event_handler)
+ if call_error != cygrpc.CallError.ok:
+ _call_error_set_RPCstate(state, call_error, metadata)
+ return _Rendezvous(state, None, None, deadline)
+ drive_call()
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
- def __init__(self, channel):
- self.lock = threading.Lock()
- self.channel = channel
- self.completion_queue = cygrpc.CompletionQueue()
- self.managed_calls = None
+ def __init__(self, channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.completion_queue = cygrpc.CompletionQueue()
+ self.managed_calls = None
def _run_channel_spin_thread(state):
- def channel_spin():
- while True:
- event = state.completion_queue.poll()
- completed_call = event.tag(event)
- if completed_call is not None:
- with state.lock:
- state.managed_calls.remove(completed_call)
- if not state.managed_calls:
- state.managed_calls = None
- return
- def stop_channel_spin(timeout):
- with state.lock:
- if state.managed_calls is not None:
- for call in state.managed_calls:
- call.cancel()
+ def channel_spin():
+ while True:
+ event = state.completion_queue.poll()
+ completed_call = event.tag(event)
+ if completed_call is not None:
+ with state.lock:
+ state.managed_calls.remove(completed_call)
+ if not state.managed_calls:
+ state.managed_calls = None
+ return
+
+ def stop_channel_spin(timeout):
+ with state.lock:
+ if state.managed_calls is not None:
+ for call in state.managed_calls:
+ call.cancel()
- channel_spin_thread = _common.CleanupThread(
- stop_channel_spin, target=channel_spin)
- channel_spin_thread.start()
+ channel_spin_thread = _common.CleanupThread(
+ stop_channel_spin, target=channel_spin)
+ channel_spin_thread.start()
def _channel_managed_call_management(state):
- def create(parent, flags, method, host, deadline):
- """Creates a managed cygrpc.Call and a function to call to drive it.
+
+ def create(parent, flags, method, host, deadline):
+ """Creates a managed cygrpc.Call and a function to call to drive it.
If operations are successfully added to the returned cygrpc.Call, the
returned function must be called. If operations are not successfully added
@@ -754,193 +771,213 @@ def _channel_managed_call_management(state):
A cygrpc.Call with which to conduct an RPC and a function to call if
operations are successfully started on the call.
"""
- call = state.channel.create_call(
- parent, flags, state.completion_queue, method, host, deadline)
-
- def drive():
- with state.lock:
- if state.managed_calls is None:
- state.managed_calls = set((call,))
- _run_channel_spin_thread(state)
- else:
- state.managed_calls.add(call)
+ call = state.channel.create_call(parent, flags, state.completion_queue,
+ method, host, deadline)
+
+ def drive():
+ with state.lock:
+ if state.managed_calls is None:
+ state.managed_calls = set((call,))
+ _run_channel_spin_thread(state)
+ else:
+ state.managed_calls.add(call)
+
+ return call, drive
- return call, drive
- return create
+ return create
class _ChannelConnectivityState(object):
- def __init__(self, channel):
- self.lock = threading.Lock()
- self.channel = channel
- self.polling = False
- self.connectivity = None
- self.try_to_connect = False
- self.callbacks_and_connectivities = []
- self.delivering = False
+ def __init__(self, channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
def _deliveries(state):
- callbacks_needing_update = []
- for callback_and_connectivity in state.callbacks_and_connectivities:
- callback, callback_connectivity, = callback_and_connectivity
- if callback_connectivity is not state.connectivity:
- callbacks_needing_update.append(callback)
- callback_and_connectivity[1] = state.connectivity
- return callbacks_needing_update
+ callbacks_needing_update = []
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback, callback_connectivity, = callback_and_connectivity
+ if callback_connectivity is not state.connectivity:
+ callbacks_needing_update.append(callback)
+ callback_and_connectivity[1] = state.connectivity
+ return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
- connectivity = initial_connectivity
- callbacks = initial_callbacks
- while True:
- for callback in callbacks:
- callable_util.call_logging_exceptions(
- callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
- connectivity)
- with state.lock:
- callbacks = _deliveries(state)
- if callbacks:
- connectivity = state.connectivity
- else:
- state.delivering = False
- return
+ connectivity = initial_connectivity
+ callbacks = initial_callbacks
+ while True:
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
+ connectivity)
+ with state.lock:
+ callbacks = _deliveries(state)
+ if callbacks:
+ connectivity = state.connectivity
+ else:
+ state.delivering = False
+ return
def _spawn_delivery(state, callbacks):
- delivering_thread = threading.Thread(
- target=_deliver, args=(state, state.connectivity, callbacks,))
- delivering_thread.start()
- state.delivering = True
+ delivering_thread = threading.Thread(
+ target=_deliver, args=(
+ state,
+ state.connectivity,
+ callbacks,))
+ delivering_thread.start()
+ state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
- try_to_connect = initial_try_to_connect
- connectivity = channel.check_connectivity_state(try_to_connect)
- with state.lock:
- state.connectivity = (
- _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
- connectivity])
- callbacks = tuple(
- callback for callback, unused_but_known_to_be_none_connectivity
- in state.callbacks_and_connectivities)
- for callback_and_connectivity in state.callbacks_and_connectivities:
- callback_and_connectivity[1] = state.connectivity
- if callbacks:
- _spawn_delivery(state, callbacks)
- completion_queue = cygrpc.CompletionQueue()
- while True:
- channel.watch_connectivity_state(
- connectivity, cygrpc.Timespec(time.time() + 0.2),
- completion_queue, None)
- event = completion_queue.poll()
+ try_to_connect = initial_try_to_connect
+ connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
- if not state.callbacks_and_connectivities and not state.try_to_connect:
- state.polling = False
- state.connectivity = None
- break
- try_to_connect = state.try_to_connect
- state.try_to_connect = False
- if event.success or try_to_connect:
- connectivity = channel.check_connectivity_state(try_to_connect)
- with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
- if not state.delivering:
- callbacks = _deliveries(state)
- if callbacks:
+ callbacks = tuple(callback
+ for callback, unused_but_known_to_be_none_connectivity
+ in state.callbacks_and_connectivities)
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback_and_connectivity[1] = state.connectivity
+ if callbacks:
_spawn_delivery(state, callbacks)
+ completion_queue = cygrpc.CompletionQueue()
+ while True:
+ channel.watch_connectivity_state(connectivity,
+ cygrpc.Timespec(time.time() + 0.2),
+ completion_queue, None)
+ event = completion_queue.poll()
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.try_to_connect:
+ state.polling = False
+ state.connectivity = None
+ break
+ try_to_connect = state.try_to_connect
+ state.try_to_connect = False
+ if event.success or try_to_connect:
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ connectivity])
+ if not state.delivering:
+ callbacks = _deliveries(state)
+ if callbacks:
+ _spawn_delivery(state, callbacks)
def _moot(state):
- with state.lock:
- del state.callbacks_and_connectivities[:]
+ with state.lock:
+ del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
- with state.lock:
- if not state.callbacks_and_connectivities and not state.polling:
- def cancel_all_subscriptions(timeout):
- _moot(state)
- polling_thread = _common.CleanupThread(
- cancel_all_subscriptions, target=_poll_connectivity,
- args=(state, state.channel, bool(try_to_connect)))
- polling_thread.start()
- state.polling = True
- state.callbacks_and_connectivities.append([callback, None])
- elif not state.delivering and state.connectivity is not None:
- _spawn_delivery(state, (callback,))
- state.try_to_connect |= bool(try_to_connect)
- state.callbacks_and_connectivities.append(
- [callback, state.connectivity])
- else:
- state.try_to_connect |= bool(try_to_connect)
- state.callbacks_and_connectivities.append([callback, None])
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.polling:
+
+ def cancel_all_subscriptions(timeout):
+ _moot(state)
+
+ polling_thread = _common.CleanupThread(
+ cancel_all_subscriptions,
+ target=_poll_connectivity,
+ args=(state, state.channel, bool(try_to_connect)))
+ polling_thread.start()
+ state.polling = True
+ state.callbacks_and_connectivities.append([callback, None])
+ elif not state.delivering and state.connectivity is not None:
+ _spawn_delivery(state, (callback,))
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append(
+ [callback, state.connectivity])
+ else:
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
- with state.lock:
- for index, (subscribed_callback, unused_connectivity) in enumerate(
- state.callbacks_and_connectivities):
- if callback == subscribed_callback:
- state.callbacks_and_connectivities.pop(index)
- break
+ with state.lock:
+ for index, (subscribed_callback, unused_connectivity
+ ) in enumerate(state.callbacks_and_connectivities):
+ if callback == subscribed_callback:
+ state.callbacks_and_connectivities.pop(index)
+ break
def _options(options):
- return list(options) + [
- (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)]
+ return list(options) + [
+ (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
+ ]
class Channel(grpc.Channel):
- """A cygrpc.Channel-backed implementation of grpc.Channel."""
+ """A cygrpc.Channel-backed implementation of grpc.Channel."""
- def __init__(self, target, options, credentials):
- """Constructor.
+ def __init__(self, target, options, credentials):
+ """Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
- self._channel = cygrpc.Channel(
- _common.encode(target), _common.channel_args(_options(options)),
- credentials)
- self._call_state = _ChannelCallState(self._channel)
- self._connectivity_state = _ChannelConnectivityState(self._channel)
-
- def subscribe(self, callback, try_to_connect=None):
- _subscribe(self._connectivity_state, callback, try_to_connect)
-
- def unsubscribe(self, callback):
- _unsubscribe(self._connectivity_state, callback)
-
- def unary_unary(
- self, method, request_serializer=None, response_deserializer=None):
- return _UnaryUnaryMultiCallable(
- self._channel, _channel_managed_call_management(self._call_state),
- _common.encode(method), request_serializer, response_deserializer)
-
- def unary_stream(
- self, method, request_serializer=None, response_deserializer=None):
- return _UnaryStreamMultiCallable(
- self._channel, _channel_managed_call_management(self._call_state),
- _common.encode(method), request_serializer, response_deserializer)
-
- def stream_unary(
- self, method, request_serializer=None, response_deserializer=None):
- return _StreamUnaryMultiCallable(
- self._channel, _channel_managed_call_management(self._call_state),
- _common.encode(method), request_serializer, response_deserializer)
-
- def stream_stream(
- self, method, request_serializer=None, response_deserializer=None):
- return _StreamStreamMultiCallable(
- self._channel, _channel_managed_call_management(self._call_state),
- _common.encode(method), request_serializer, response_deserializer)
-
- def __del__(self):
- _moot(self._connectivity_state)
+ self._channel = cygrpc.Channel(
+ _common.encode(target),
+ _common.channel_args(_options(options)), credentials)
+ self._call_state = _ChannelCallState(self._channel)
+ self._connectivity_state = _ChannelConnectivityState(self._channel)
+
+ def subscribe(self, callback, try_to_connect=None):
+ _subscribe(self._connectivity_state, callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ _unsubscribe(self._connectivity_state, callback)
+
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _UnaryUnaryMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _UnaryStreamMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _StreamUnaryMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _StreamStreamMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def __del__(self):
+ _moot(self._connectivity_state)
diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py
index cc0984c8c6..7ef2571379 100644
--- a/src/python/grpcio/grpc/_common.py
+++ b/src/python/grpcio/grpc/_common.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Shared implementation."""
import logging
@@ -45,9 +44,8 @@ CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.connecting: grpc.ChannelConnectivity.CONNECTING,
cygrpc.ConnectivityState.ready: grpc.ChannelConnectivity.READY,
cygrpc.ConnectivityState.transient_failure:
- grpc.ChannelConnectivity.TRANSIENT_FAILURE,
- cygrpc.ConnectivityState.shutdown:
- grpc.ChannelConnectivity.SHUTDOWN,
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ cygrpc.ConnectivityState.shutdown: grpc.ChannelConnectivity.SHUTDOWN,
}
CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
@@ -77,83 +75,88 @@ STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
def encode(s):
- if isinstance(s, bytes):
- return s
- else:
- return s.encode('ascii')
+ if isinstance(s, bytes):
+ return s
+ else:
+ return s.encode('ascii')
def decode(b):
- if isinstance(b, str):
- return b
- else:
- try:
- return b.decode('utf8')
- except UnicodeDecodeError:
- logging.exception('Invalid encoding on {}'.format(b))
- return b.decode('latin1')
+ if isinstance(b, str):
+ return b
+ else:
+ try:
+ return b.decode('utf8')
+ except UnicodeDecodeError:
+ logging.exception('Invalid encoding on {}'.format(b))
+ return b.decode('latin1')
def channel_args(options):
- channel_args = []
- for key, value in options:
- if isinstance(value, six.string_types):
- channel_args.append(cygrpc.ChannelArg(encode(key), encode(value)))
- else:
- channel_args.append(cygrpc.ChannelArg(encode(key), value))
- return cygrpc.ChannelArgs(channel_args)
+ channel_args = []
+ for key, value in options:
+ if isinstance(value, six.string_types):
+ channel_args.append(cygrpc.ChannelArg(encode(key), encode(value)))
+ else:
+ channel_args.append(cygrpc.ChannelArg(encode(key), value))
+ return cygrpc.ChannelArgs(channel_args)
def cygrpc_metadata(application_metadata):
- return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
- cygrpc.Metadatum(encode(key), encode(value))
- for key, value in application_metadata)
+ return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
+ cygrpc.Metadatum(encode(key), encode(value))
+ for key, value in application_metadata)
def application_metadata(cygrpc_metadata):
- if cygrpc_metadata is None:
- return ()
- else:
- return tuple(
- (decode(key), value if key[-4:] == b'-bin' else decode(value))
- for key, value in cygrpc_metadata)
+ if cygrpc_metadata is None:
+ return ()
+ else:
+ return tuple((decode(key), value
+ if key[-4:] == b'-bin' else decode(value))
+ for key, value in cygrpc_metadata)
def _transform(message, transformer, exception_message):
- if transformer is None:
- return message
- else:
- try:
- return transformer(message)
- except Exception: # pylint: disable=broad-except
- logging.exception(exception_message)
- return None
+ if transformer is None:
+ return message
+ else:
+ try:
+ return transformer(message)
+ except Exception: # pylint: disable=broad-except
+ logging.exception(exception_message)
+ return None
def serialize(message, serializer):
- return _transform(message, serializer, 'Exception serializing message!')
+ return _transform(message, serializer, 'Exception serializing message!')
def deserialize(serialized_message, deserializer):
- return _transform(serialized_message, deserializer,
- 'Exception deserializing message!')
+ return _transform(serialized_message, deserializer,
+ 'Exception deserializing message!')
def fully_qualified_method(group, method):
- return '/{}/{}'.format(group, method)
+ return '/{}/{}'.format(group, method)
class CleanupThread(threading.Thread):
- """A threading.Thread subclass supporting custom behavior on join().
+ """A threading.Thread subclass supporting custom behavior on join().
On Python Interpreter exit, Python will attempt to join outstanding threads
prior to garbage collection. We may need to do additional cleanup, and
we accomplish this by overriding the join() method.
"""
- def __init__(self, behavior, group=None, target=None, name=None,
- args=(), kwargs={}):
- """Constructor.
+ def __init__(self,
+ behavior,
+ group=None,
+ target=None,
+ name=None,
+ args=(),
+ kwargs={}):
+ """Constructor.
Args:
behavior (function): Function called on join() with a single
@@ -169,15 +172,15 @@ class CleanupThread(threading.Thread):
kwargs (dict[str,object]): A dictionary of keyword arguments to
pass to `target`.
"""
- super(CleanupThread, self).__init__(group=group, target=target,
- name=name, args=args, kwargs=kwargs)
- self._behavior = behavior
-
- def join(self, timeout=None):
- start_time = time.time()
- self._behavior(timeout)
- end_time = time.time()
- if timeout is not None:
- timeout -= end_time - start_time
- timeout = max(timeout, 0)
- super(CleanupThread, self).join(timeout)
+ super(CleanupThread, self).__init__(
+ group=group, target=target, name=name, args=args, kwargs=kwargs)
+ self._behavior = behavior
+
+ def join(self, timeout=None):
+ start_time = time.time()
+ self._behavior(timeout)
+ end_time = time.time()
+ if timeout is not None:
+ timeout -= end_time - start_time
+ timeout = max(timeout, 0)
+ super(CleanupThread, self).join(timeout)
diff --git a/src/python/grpcio/grpc/_credential_composition.py b/src/python/grpcio/grpc/_credential_composition.py
index 9cb5508e27..bdf017baa5 100644
--- a/src/python/grpcio/grpc/_credential_composition.py
+++ b/src/python/grpcio/grpc/_credential_composition.py
@@ -31,18 +31,18 @@ from grpc._cython import cygrpc
def _call(call_credentialses):
- call_credentials_iterator = iter(call_credentialses)
- composition = next(call_credentials_iterator)
- for additional_call_credentials in call_credentials_iterator:
- composition = cygrpc.call_credentials_composite(
- composition, additional_call_credentials)
- return composition
+ call_credentials_iterator = iter(call_credentialses)
+ composition = next(call_credentials_iterator)
+ for additional_call_credentials in call_credentials_iterator:
+ composition = cygrpc.call_credentials_composite(
+ composition, additional_call_credentials)
+ return composition
def call(call_credentialses):
- return _call(call_credentialses)
+ return _call(call_credentialses)
def channel(channel_credentials, call_credentialses):
- return cygrpc.channel_credentials_composite(
- channel_credentials, _call(call_credentialses))
+ return cygrpc.channel_credentials_composite(channel_credentials,
+ _call(call_credentialses))
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index 73d1ff7b97..246e8399bc 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -60,20 +60,25 @@ cdef class Channel:
method, host, Timespec deadline not None):
if queue.is_shutting_down:
raise ValueError("queue must not be shutting down or shutdown")
- cdef char *method_c_string = method
- cdef char *host_c_string = NULL
+ cdef grpc_slice method_slice = _slice_from_bytes(method)
+ cdef grpc_slice host_slice
+ cdef grpc_slice *host_slice_ptr = NULL
if host is not None:
- host_c_string = host
+ host_slice = _slice_from_bytes(host)
+ host_slice_ptr = &host_slice
cdef Call operation_call = Call()
- operation_call.references = [self, method, host, queue]
+ operation_call.references = [self, queue]
cdef grpc_call *parent_call = NULL
if parent is not None:
parent_call = parent.c_call
with nogil:
operation_call.c_call = grpc_channel_create_call(
self.c_channel, parent_call, flags,
- queue.c_completion_queue, method_c_string, host_c_string,
+ queue.c_completion_queue, method_slice, host_slice_ptr,
deadline.c_time, NULL)
+ grpc_slice_unref(method_slice)
+ if host_slice_ptr:
+ grpc_slice_unref(host_slice)
return operation_call
def check_connectivity_state(self, bint try_to_connect):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index a258ba4063..d8df6c2ef4 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -51,6 +51,7 @@ cdef class CompletionQueue:
cdef CallDetails request_call_details = None
cdef Metadata request_metadata = None
cdef Operations batch_operations = None
+ cdef Operation batch_operation = None
if event.type == GRPC_QUEUE_TIMEOUT:
return Event(
event.type, False, None, None, None, None, False, None)
@@ -69,8 +70,15 @@ cdef class CompletionQueue:
user_tag = tag.user_tag
operation_call = tag.operation_call
request_call_details = tag.request_call_details
- request_metadata = tag.request_metadata
+ if tag.request_metadata is not None:
+ request_metadata = tag.request_metadata
+ request_metadata._claim_slice_ownership()
batch_operations = tag.batch_operations
+ if tag.batch_operations is not None:
+ for op in batch_operations.operations:
+ batch_operation = <Operation>op
+ if batch_operation._received_metadata is not None:
+ batch_operation._received_metadata._claim_slice_ownership()
if tag.is_new_request:
# Stuff in the tag not explicitly handled by us needs to live through
# the life of the call
@@ -91,7 +99,7 @@ cdef class CompletionQueue:
c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
if deadline is not None:
c_deadline = deadline.c_time
-
+
while True:
c_timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c_increment)
if gpr_time_cmp(c_timeout, c_deadline) > 0:
@@ -100,7 +108,7 @@ cdef class CompletionQueue:
self.c_completion_queue, c_timeout, NULL)
if event.type != GRPC_QUEUE_TIMEOUT or gpr_time_cmp(c_timeout, c_deadline) == 0:
break;
-
+
# Handle any signals
with gil:
cpython.PyErr_CheckSignals()
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index ad766186bd..141580b82a 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -51,6 +51,13 @@ cdef extern from "grpc/byte_buffer_reader.h":
pass
+cdef extern from "grpc/impl/codegen/exec_ctx_fwd.h":
+
+ struct grpc_exec_ctx:
+ # We don't care about the internals
+ pass
+
+
cdef extern from "grpc/grpc.h":
ctypedef struct grpc_slice:
@@ -60,6 +67,7 @@ cdef extern from "grpc/grpc.h":
grpc_slice grpc_slice_ref(grpc_slice s) nogil
void grpc_slice_unref(grpc_slice s) nogil
+ grpc_slice grpc_empty_slice() nogil
grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) nogil
grpc_slice grpc_slice_new_with_len(
void *p, size_t len, void (*destroy)(void *, size_t)) nogil
@@ -175,7 +183,7 @@ cdef extern from "grpc/grpc.h":
ctypedef struct grpc_arg_pointer_vtable:
void *(*copy)(void *)
- void (*destroy)(void *)
+ void (*destroy)(grpc_exec_ctx *, void *)
int (*cmp)(void *, void *)
ctypedef struct grpc_arg_value_pointer:
@@ -217,9 +225,8 @@ cdef extern from "grpc/grpc.h":
GRPC_CHANNEL_SHUTDOWN
ctypedef struct grpc_metadata:
- const char *key
- const char *value
- size_t value_length
+ grpc_slice key
+ grpc_slice value
# ignore the 'internal_data.obfuscated' fields.
ctypedef enum grpc_completion_type:
@@ -241,10 +248,8 @@ cdef extern from "grpc/grpc.h":
void grpc_metadata_array_destroy(grpc_metadata_array *array) nogil
ctypedef struct grpc_call_details:
- char *method
- size_t method_capacity
- char *host
- size_t host_capacity
+ grpc_slice method
+ grpc_slice host
gpr_timespec deadline
void grpc_call_details_init(grpc_call_details *details) nogil
@@ -268,13 +273,12 @@ cdef extern from "grpc/grpc.h":
size_t trailing_metadata_count
grpc_metadata *trailing_metadata
grpc_status_code status
- const char *status_details
+ grpc_slice *status_details
ctypedef struct grpc_op_data_recv_status_on_client:
grpc_metadata_array *trailing_metadata
grpc_status_code *status
- char **status_details
- size_t *status_details_capacity
+ grpc_slice *status_details
ctypedef struct grpc_op_data_recv_close_on_server:
int *cancelled
@@ -321,9 +325,9 @@ cdef extern from "grpc/grpc.h":
const grpc_channel_args *args,
void *reserved) nogil
grpc_call *grpc_channel_create_call(
- grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
- grpc_completion_queue *completion_queue, const char *method,
- const char *host, gpr_timespec deadline, void *reserved) nogil
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *completion_queue, grpc_slice method,
+ const grpc_slice *host, gpr_timespec deadline, void *reserved) nogil
grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel *channel, int try_to_connect) nogil
void grpc_channel_watch_connectivity_state(
@@ -473,8 +477,7 @@ cdef extern from "grpc/compression.h":
grpc_compression_algorithm default_compression_algorithm
int grpc_compression_algorithm_parse(
- const char *name, size_t name_length,
- grpc_compression_algorithm *algorithm) nogil
+ grpc_slice value, grpc_compression_algorithm *algorithm) nogil
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) nogil
grpc_compression_algorithm grpc_compression_algorithm_for_level(
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 00ec91b131..c4a17118c0 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -28,6 +28,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+cdef bytes _slice_bytes(grpc_slice slice)
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil
+cdef grpc_slice _slice_from_bytes(bytes value) nogil
+
+
cdef class Timespec:
cdef gpr_timespec c_time
@@ -97,13 +102,13 @@ cdef class ChannelArgs:
cdef class Metadatum:
cdef grpc_metadata c_metadata
- cdef object _key, _value
+ cdef void _copy_metadatum(self, grpc_metadata *destination) nogil
cdef class Metadata:
cdef grpc_metadata_array c_metadata_array
- cdef object metadata
+ cdef void _claim_slice_ownership(self)
cdef class Operation:
@@ -112,8 +117,7 @@ cdef class Operation:
cdef ByteBuffer _received_message
cdef Metadata _received_metadata
cdef grpc_status_code _received_status_code
- cdef char *_received_status_details
- cdef size_t _received_status_details_capacity
+ cdef grpc_slice _status_details
cdef int _received_cancelled
cdef readonly bint is_valid
cdef object references
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index cadfce6ee6..d052b3f8bc 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -29,6 +29,26 @@
from libc.stdint cimport intptr_t
+
+cdef bytes _slice_bytes(grpc_slice slice):
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return (<const char *>start)[:length]
+
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil:
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return grpc_slice_from_copied_buffer(<const char *>start, length)
+
+cdef grpc_slice _slice_from_bytes(bytes value) nogil:
+ cdef const char *value_ptr
+ cdef size_t length
+ with gil:
+ value_ptr = <const char *>value
+ length = len(value)
+ return grpc_slice_from_copied_buffer(value_ptr, length)
+
+
class ConnectivityState:
idle = GRPC_CHANNEL_IDLE
connecting = GRPC_CHANNEL_CONNECTING
@@ -189,17 +209,11 @@ cdef class CallDetails:
@property
def method(self):
- if self.c_details.method != NULL:
- return <bytes>self.c_details.method
- else:
- return None
+ return _slice_bytes(self.c_details.method)
@property
def host(self):
- if self.c_details.host != NULL:
- return <bytes>self.c_details.host
- else:
- return None
+ return _slice_bytes(self.c_details.host)
@property
def deadline(self):
@@ -310,7 +324,7 @@ cdef void* copy_ptr(void* ptr):
return ptr
-cdef void destroy_ptr(void* ptr):
+cdef void destroy_ptr(grpc_exec_ctx* ctx, void* ptr):
pass
@@ -383,19 +397,20 @@ cdef class ChannelArgs:
cdef class Metadatum:
def __cinit__(self, bytes key, bytes value):
- self._key = key
- self._value = value
- self.c_metadata.key = self._key
- self.c_metadata.value = self._value
- self.c_metadata.value_length = len(self._value)
+ self.c_metadata.key = _slice_from_bytes(key)
+ self.c_metadata.value = _slice_from_bytes(value)
+
+ cdef void _copy_metadatum(self, grpc_metadata *destination) nogil:
+ destination[0].key = _copy_slice(self.c_metadata.key)
+ destination[0].value = _copy_slice(self.c_metadata.value)
@property
def key(self):
- return <bytes>self.c_metadata.key
+ return _slice_bytes(self.c_metadata.key)
@property
def value(self):
- return <bytes>self.c_metadata.value[:self.c_metadata.value_length]
+ return _slice_bytes(self.c_metadata.value)
def __len__(self):
return 2
@@ -411,6 +426,9 @@ cdef class Metadatum:
def __iter__(self):
return iter((self.key, self.value))
+ def __dealloc__(self):
+ grpc_slice_unref(self.c_metadata.key)
+ grpc_slice_unref(self.c_metadata.value)
cdef class _MetadataIterator:
@@ -435,51 +453,65 @@ cdef class _MetadataIterator:
cdef class Metadata:
- def __cinit__(self, metadata):
- grpc_init()
- self.metadata = list(metadata)
+ def __cinit__(self, metadata_iterable):
+ with nogil:
+ grpc_init()
+ grpc_metadata_array_init(&self.c_metadata_array)
+ metadata = list(metadata_iterable)
for metadatum in metadata:
if not isinstance(metadatum, Metadatum):
raise TypeError("expected list of Metadatum")
- with nogil:
- grpc_metadata_array_init(&self.c_metadata_array)
- self.c_metadata_array.count = len(self.metadata)
- self.c_metadata_array.capacity = len(self.metadata)
+ self.c_metadata_array.count = len(metadata)
+ self.c_metadata_array.capacity = len(metadata)
with nogil:
self.c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
self.c_metadata_array.count*sizeof(grpc_metadata)
)
for i in range(self.c_metadata_array.count):
- self.c_metadata_array.metadata[i] = (
- (<Metadatum>self.metadata[i]).c_metadata)
+ (<Metadatum>metadata[i])._copy_metadatum(&self.c_metadata_array.metadata[i])
def __dealloc__(self):
- # this frees the allocated memory for the grpc_metadata_array (although
- # it'd be nice if that were documented somewhere...)
- # TODO(atash): document this in the C core
- grpc_metadata_array_destroy(&self.c_metadata_array)
- grpc_shutdown()
+ with nogil:
+ # this frees the allocated memory for the grpc_metadata_array (although
+ # it'd be nice if that were documented somewhere...)
+ # TODO(atash): document this in the C core
+ grpc_metadata_array_destroy(&self.c_metadata_array)
+ grpc_shutdown()
def __len__(self):
return self.c_metadata_array.count
def __getitem__(self, size_t i):
- return Metadatum(
- key=<bytes>self.c_metadata_array.metadata[i].key,
- value=<bytes>self.c_metadata_array.metadata[i].value[
- :self.c_metadata_array.metadata[i].value_length])
+ if i >= self.c_metadata_array.count:
+ raise IndexError
+ key = _slice_bytes(self.c_metadata_array.metadata[i].key)
+ value = _slice_bytes(self.c_metadata_array.metadata[i].value)
+ return Metadatum(key=key, value=value)
def __iter__(self):
return _MetadataIterator(self)
+ cdef void _claim_slice_ownership(self):
+ cdef grpc_metadata_array new_c_metadata_array
+ grpc_metadata_array_init(&new_c_metadata_array)
+ new_c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
+ self.c_metadata_array.count*sizeof(grpc_metadata))
+ new_c_metadata_array.count = self.c_metadata_array.count
+ for i in range(self.c_metadata_array.count):
+ new_c_metadata_array.metadata[i].key = _copy_slice(
+ self.c_metadata_array.metadata[i].key)
+ new_c_metadata_array.metadata[i].value = _copy_slice(
+ self.c_metadata_array.metadata[i].value)
+ grpc_metadata_array_destroy(&self.c_metadata_array)
+ self.c_metadata_array = new_c_metadata_array
+
cdef class Operation:
def __cinit__(self):
grpc_init()
self.references = []
- self._received_status_details = NULL
- self._received_status_details_capacity = 0
+ self._status_details = grpc_empty_slice()
self.is_valid = False
@property
@@ -536,19 +568,13 @@ cdef class Operation:
def received_status_details(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
raise TypeError("self must be an operation receiving status details")
- if self._received_status_details:
- return self._received_status_details
- else:
- return None
+ return _slice_bytes(self._status_details)
@property
def received_status_details_or_none(self):
if self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT:
return None
- if self._received_status_details:
- return self._received_status_details
- else:
- return None
+ return _slice_bytes(self._status_details)
@property
def received_cancelled(self):
@@ -564,11 +590,7 @@ cdef class Operation:
return False if self._received_cancelled == 0 else True
def __dealloc__(self):
- # We *almost* don't need to do anything; most of the objects are handled by
- # Python. The remaining one(s) are primitive fields filled in by GRPC core.
- # This means that we need to clean up after receive_status_on_client.
- if self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT:
- gpr_free(self._received_status_details)
+ grpc_slice_unref(self._status_details)
grpc_shutdown()
def operation_send_initial_metadata(Metadata metadata, int flags):
@@ -609,9 +631,10 @@ def operation_send_status_from_server(
op.c_op.data.send_status_from_server.trailing_metadata = (
metadata.c_metadata_array.metadata)
op.c_op.data.send_status_from_server.status = code
- op.c_op.data.send_status_from_server.status_details = details
+ grpc_slice_unref(op._status_details)
+ op._status_details = _slice_from_bytes(details)
+ op.c_op.data.send_status_from_server.status_details = &op._status_details
op.references.append(metadata)
- op.references.append(details)
op.is_valid = True
return op
@@ -647,9 +670,7 @@ def operation_receive_status_on_client(int flags):
op.c_op.data.receive_status_on_client.status = (
&op._received_status_code)
op.c_op.data.receive_status_on_client.status_details = (
- &op._received_status_details)
- op.c_op.data.receive_status_on_client.status_details_capacity = (
- &op._received_status_details_capacity)
+ &op._status_details)
op.is_valid = True
return op
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
index 7cb5218c22..bb9a42f3ff 100644
--- a/src/python/grpcio/grpc/_plugin_wrapping.py
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -36,82 +36,82 @@ from grpc._cython import cygrpc
class AuthMetadataContext(
- collections.namedtuple(
- 'AuthMetadataContext', ('service_url', 'method_name',)),
- grpc.AuthMetadataContext):
- pass
+ collections.namedtuple('AuthMetadataContext', (
+ 'service_url',
+ 'method_name',)), grpc.AuthMetadataContext):
+ pass
class AuthMetadataPluginCallback(grpc.AuthMetadataContext):
- def __init__(self, callback):
- self._callback = callback
+ def __init__(self, callback):
+ self._callback = callback
- def __call__(self, metadata, error):
- self._callback(metadata, error)
+ def __call__(self, metadata, error):
+ self._callback(metadata, error)
class _WrappedCygrpcCallback(object):
- def __init__(self, cygrpc_callback):
- self.is_called = False
- self.error = None
- self.is_called_lock = threading.Lock()
- self.cygrpc_callback = cygrpc_callback
-
- def _invoke_failure(self, error):
- # TODO(atash) translate different Exception superclasses into different
- # status codes.
- self.cygrpc_callback(
- _common.EMPTY_METADATA, cygrpc.StatusCode.internal,
- _common.encode(str(error)))
-
- def _invoke_success(self, metadata):
- try:
- cygrpc_metadata = _common.cygrpc_metadata(metadata)
- except Exception as error:
- self._invoke_failure(error)
- return
- self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, b'')
-
- def __call__(self, metadata, error):
- with self.is_called_lock:
- if self.is_called:
- raise RuntimeError('callback should only ever be invoked once')
- if self.error:
- self._invoke_failure(self.error)
- return
- self.is_called = True
- if error is None:
- self._invoke_success(metadata)
- else:
- self._invoke_failure(error)
-
- def notify_failure(self, error):
- with self.is_called_lock:
- if not self.is_called:
- self.error = error
+ def __init__(self, cygrpc_callback):
+ self.is_called = False
+ self.error = None
+ self.is_called_lock = threading.Lock()
+ self.cygrpc_callback = cygrpc_callback
+
+ def _invoke_failure(self, error):
+ # TODO(atash) translate different Exception superclasses into different
+ # status codes.
+ self.cygrpc_callback(_common.EMPTY_METADATA, cygrpc.StatusCode.internal,
+ _common.encode(str(error)))
+
+ def _invoke_success(self, metadata):
+ try:
+ cygrpc_metadata = _common.cygrpc_metadata(metadata)
+ except Exception as error:
+ self._invoke_failure(error)
+ return
+ self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, b'')
+
+ def __call__(self, metadata, error):
+ with self.is_called_lock:
+ if self.is_called:
+ raise RuntimeError('callback should only ever be invoked once')
+ if self.error:
+ self._invoke_failure(self.error)
+ return
+ self.is_called = True
+ if error is None:
+ self._invoke_success(metadata)
+ else:
+ self._invoke_failure(error)
+
+ def notify_failure(self, error):
+ with self.is_called_lock:
+ if not self.is_called:
+ self.error = error
class _WrappedPlugin(object):
- def __init__(self, plugin):
- self.plugin = plugin
+ def __init__(self, plugin):
+ self.plugin = plugin
- def __call__(self, context, cygrpc_callback):
- wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
- wrapped_context = AuthMetadataContext(
- _common.decode(context.service_url), _common.decode(context.method_name))
- try:
- self.plugin(
- wrapped_context, AuthMetadataPluginCallback(wrapped_cygrpc_callback))
- except Exception as error:
- wrapped_cygrpc_callback.notify_failure(error)
- raise
+ def __call__(self, context, cygrpc_callback):
+ wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
+ wrapped_context = AuthMetadataContext(
+ _common.decode(context.service_url),
+ _common.decode(context.method_name))
+ try:
+ self.plugin(wrapped_context,
+ AuthMetadataPluginCallback(wrapped_cygrpc_callback))
+ except Exception as error:
+ wrapped_cygrpc_callback.notify_failure(error)
+ raise
def call_credentials_metadata_plugin(plugin, name):
- """
+ """
Args:
plugin: A callable accepting a grpc.AuthMetadataContext
object and a callback (itself accepting a list of metadata key/value
@@ -119,5 +119,6 @@ def call_credentials_metadata_plugin(plugin, name):
called, but need not be called in plugin's invocation.
plugin's invocation must be non-blocking.
"""
- return cygrpc.call_credentials_metadata_plugin(
- cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), _common.encode(name)))
+ return cygrpc.call_credentials_metadata_plugin(
+ cygrpc.CredentialsMetadataPlugin(
+ _WrappedPlugin(plugin), _common.encode(name)))
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index 5223712dfa..7b7b4d5dab 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Service-side implementation of gRPC Python."""
import collections
@@ -64,692 +63,717 @@ _UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
- return request_event.batch_operations[0].received_message.bytes()
+ return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
- cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
- return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
+ cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
+ return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
- if state.code is None:
- return cygrpc.StatusCode.ok
- else:
- return _application_code(state.code)
+ if state.code is None:
+ return cygrpc.StatusCode.ok
+ else:
+ return _application_code(state.code)
def _abortion_code(state, code):
- if state.code is None:
- return code
- else:
- return _application_code(state.code)
+ if state.code is None:
+ return code
+ else:
+ return _application_code(state.code)
def _details(state):
- return b'' if state.details is None else state.details
+ return b'' if state.details is None else state.details
class _HandlerCallDetails(
- collections.namedtuple(
- '_HandlerCallDetails', ('method', 'invocation_metadata',)),
- grpc.HandlerCallDetails):
- pass
+ collections.namedtuple('_HandlerCallDetails', (
+ 'method',
+ 'invocation_metadata',)), grpc.HandlerCallDetails):
+ pass
class _RPCState(object):
- def __init__(self):
- self.condition = threading.Condition()
- self.due = set()
- self.request = None
- self.client = _OPEN
- self.initial_metadata_allowed = True
- self.disable_next_compression = False
- self.trailing_metadata = None
- self.code = None
- self.details = None
- self.statused = False
- self.rpc_errors = []
- self.callbacks = []
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.due = set()
+ self.request = None
+ self.client = _OPEN
+ self.initial_metadata_allowed = True
+ self.disable_next_compression = False
+ self.trailing_metadata = None
+ self.code = None
+ self.details = None
+ self.statused = False
+ self.rpc_errors = []
+ self.callbacks = []
def _raise_rpc_error(state):
- rpc_error = grpc.RpcError()
- state.rpc_errors.append(rpc_error)
- raise rpc_error
+ rpc_error = grpc.RpcError()
+ state.rpc_errors.append(rpc_error)
+ raise rpc_error
def _possibly_finish_call(state, token):
- state.due.remove(token)
- if (state.client is _CANCELLED or state.statused) and not state.due:
- callbacks = state.callbacks
- state.callbacks = None
- return state, callbacks
- else:
- return None, ()
+ state.due.remove(token)
+ if (state.client is _CANCELLED or state.statused) and not state.due:
+ callbacks = state.callbacks
+ state.callbacks = None
+ return state, callbacks
+ else:
+ return None, ()
def _send_status_from_server(state, token):
- def send_status_from_server(unused_send_status_from_server_event):
- with state.condition:
- return _possibly_finish_call(state, token)
- return send_status_from_server
+
+ def send_status_from_server(unused_send_status_from_server_event):
+ with state.condition:
+ return _possibly_finish_call(state, token)
+
+ return send_status_from_server
def _abort(state, call, code, details):
- if state.client is not _CANCELLED:
- effective_code = _abortion_code(state, code)
- effective_details = details if state.details is None else state.details
- if state.initial_metadata_allowed:
- operations = (
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- _common.cygrpc_metadata(state.trailing_metadata), effective_code,
- effective_details, _EMPTY_FLAGS),
- )
- token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
- else:
- operations = (
- cygrpc.operation_send_status_from_server(
- _common.cygrpc_metadata(state.trailing_metadata), effective_code,
- effective_details, _EMPTY_FLAGS),
- )
- token = _SEND_STATUS_FROM_SERVER_TOKEN
- call.start_server_batch(
- cygrpc.Operations(operations),
- _send_status_from_server(state, token))
- state.statused = True
- state.due.add(token)
+ if state.client is not _CANCELLED:
+ effective_code = _abortion_code(state, code)
+ effective_details = details if state.details is None else state.details
+ if state.initial_metadata_allowed:
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _common.cygrpc_metadata(state.trailing_metadata),
+ effective_code, effective_details, _EMPTY_FLAGS),)
+ token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
+ else:
+ operations = (cygrpc.operation_send_status_from_server(
+ _common.cygrpc_metadata(state.trailing_metadata),
+ effective_code, effective_details, _EMPTY_FLAGS),)
+ token = _SEND_STATUS_FROM_SERVER_TOKEN
+ call.start_server_batch(
+ cygrpc.Operations(operations),
+ _send_status_from_server(state, token))
+ state.statused = True
+ state.due.add(token)
def _receive_close_on_server(state):
- def receive_close_on_server(receive_close_on_server_event):
- with state.condition:
- if receive_close_on_server_event.batch_operations[0].received_cancelled:
- state.client = _CANCELLED
- elif state.client is _OPEN:
- state.client = _CLOSED
- state.condition.notify_all()
- return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
- return receive_close_on_server
+
+ def receive_close_on_server(receive_close_on_server_event):
+ with state.condition:
+ if receive_close_on_server_event.batch_operations[
+ 0].received_cancelled:
+ state.client = _CANCELLED
+ elif state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
+
+ return receive_close_on_server
def _receive_message(state, call, request_deserializer):
- def receive_message(receive_message_event):
- serialized_request = _serialized_request(receive_message_event)
- if serialized_request is None:
- with state.condition:
- if state.client is _OPEN:
- state.client = _CLOSED
- state.condition.notify_all()
- return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
- else:
- request = _common.deserialize(serialized_request, request_deserializer)
- with state.condition:
- if request is None:
- _abort(
- state, call, cygrpc.StatusCode.internal,
- b'Exception deserializing request!')
+
+ def receive_message(receive_message_event):
+ serialized_request = _serialized_request(receive_message_event)
+ if serialized_request is None:
+ with state.condition:
+ if state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
- state.request = request
- state.condition.notify_all()
- return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
- return receive_message
+ request = _common.deserialize(serialized_request,
+ request_deserializer)
+ with state.condition:
+ if request is None:
+ _abort(state, call, cygrpc.StatusCode.internal,
+ b'Exception deserializing request!')
+ else:
+ state.request = request
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+
+ return receive_message
def _send_initial_metadata(state):
- def send_initial_metadata(unused_send_initial_metadata_event):
- with state.condition:
- return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
- return send_initial_metadata
+
+ def send_initial_metadata(unused_send_initial_metadata_event):
+ with state.condition:
+ return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
+
+ return send_initial_metadata
def _send_message(state, token):
- def send_message(unused_send_message_event):
- with state.condition:
- state.condition.notify_all()
- return _possibly_finish_call(state, token)
- return send_message
+
+ def send_message(unused_send_message_event):
+ with state.condition:
+ state.condition.notify_all()
+ return _possibly_finish_call(state, token)
+
+ return send_message
class _Context(grpc.ServicerContext):
- def __init__(self, rpc_event, state, request_deserializer):
- self._rpc_event = rpc_event
- self._state = state
- self._request_deserializer = request_deserializer
+ def __init__(self, rpc_event, state, request_deserializer):
+ self._rpc_event = rpc_event
+ self._state = state
+ self._request_deserializer = request_deserializer
- def is_active(self):
- with self._state.condition:
- return self._state.client is not _CANCELLED and not self._state.statused
+ def is_active(self):
+ with self._state.condition:
+ return self._state.client is not _CANCELLED and not self._state.statused
- def time_remaining(self):
- return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
+ def time_remaining(self):
+ return max(self._rpc_event.request_call_details.deadline - time.time(),
+ 0)
- def cancel(self):
- self._rpc_event.operation_call.cancel()
+ def cancel(self):
+ self._rpc_event.operation_call.cancel()
- def add_callback(self, callback):
- with self._state.condition:
- if self._state.callbacks is None:
- return False
- else:
- self._state.callbacks.append(callback)
- return True
+ def add_callback(self, callback):
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
- def disable_next_message_compression(self):
- with self._state.condition:
- self._state.disable_next_compression = True
-
- def invocation_metadata(self):
- return _common.application_metadata(self._rpc_event.request_metadata)
-
- def peer(self):
- return _common.decode(self._rpc_event.operation_call.peer())
-
- def send_initial_metadata(self, initial_metadata):
- with self._state.condition:
- if self._state.client is _CANCELLED:
- _raise_rpc_error(self._state)
- else:
- if self._state.initial_metadata_allowed:
- operation = cygrpc.operation_send_initial_metadata(
- _common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
- self._rpc_event.operation_call.start_server_batch(
- cygrpc.Operations((operation,)),
- _send_initial_metadata(self._state))
- self._state.initial_metadata_allowed = False
- self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
- else:
- raise ValueError('Initial metadata no longer allowed!')
+ def disable_next_message_compression(self):
+ with self._state.condition:
+ self._state.disable_next_compression = True
- def set_trailing_metadata(self, trailing_metadata):
- with self._state.condition:
- self._state.trailing_metadata = _common.cygrpc_metadata(
- trailing_metadata)
+ def invocation_metadata(self):
+ return _common.application_metadata(self._rpc_event.request_metadata)
- def set_code(self, code):
- with self._state.condition:
- self._state.code = code
+ def peer(self):
+ return _common.decode(self._rpc_event.operation_call.peer())
- def set_details(self, details):
- with self._state.condition:
- self._state.details = _common.encode(details)
+ def send_initial_metadata(self, initial_metadata):
+ with self._state.condition:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ else:
+ if self._state.initial_metadata_allowed:
+ operation = cygrpc.operation_send_initial_metadata(
+ _common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
+ self._rpc_event.operation_call.start_server_batch(
+ cygrpc.Operations((operation,)),
+ _send_initial_metadata(self._state))
+ self._state.initial_metadata_allowed = False
+ self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
+ else:
+ raise ValueError('Initial metadata no longer allowed!')
+
+ def set_trailing_metadata(self, trailing_metadata):
+ with self._state.condition:
+ self._state.trailing_metadata = _common.cygrpc_metadata(
+ trailing_metadata)
+
+ def set_code(self, code):
+ with self._state.condition:
+ self._state.code = code
+
+ def set_details(self, details):
+ with self._state.condition:
+ self._state.details = _common.encode(details)
class _RequestIterator(object):
- def __init__(self, state, call, request_deserializer):
- self._state = state
- self._call = call
- self._request_deserializer = request_deserializer
+ def __init__(self, state, call, request_deserializer):
+ self._state = state
+ self._call = call
+ self._request_deserializer = request_deserializer
- def _raise_or_start_receive_message(self):
- if self._state.client is _CANCELLED:
- _raise_rpc_error(self._state)
- elif self._state.client is _CLOSED or self._state.statused:
- raise StopIteration()
- else:
- self._call.start_server_batch(
- cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
- _receive_message(self._state, self._call, self._request_deserializer))
- self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
-
- def _look_for_request(self):
- if self._state.client is _CANCELLED:
- _raise_rpc_error(self._state)
- elif (self._state.request is None and
- _RECEIVE_MESSAGE_TOKEN not in self._state.due):
- raise StopIteration()
- else:
- request = self._state.request
- self._state.request = None
- return request
+ def _raise_or_start_receive_message(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif self._state.client is _CLOSED or self._state.statused:
+ raise StopIteration()
+ else:
+ self._call.start_server_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _receive_message(self._state, self._call,
+ self._request_deserializer))
+ self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
+
+ def _look_for_request(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif (self._state.request is None and
+ _RECEIVE_MESSAGE_TOKEN not in self._state.due):
+ raise StopIteration()
+ else:
+ request = self._state.request
+ self._state.request = None
+ return request
- def _next(self):
- with self._state.condition:
- self._raise_or_start_receive_message()
- while True:
- self._state.condition.wait()
- request = self._look_for_request()
- if request is not None:
- return request
+ def _next(self):
+ with self._state.condition:
+ self._raise_or_start_receive_message()
+ while True:
+ self._state.condition.wait()
+ request = self._look_for_request()
+ if request is not None:
+ return request
- def __iter__(self):
- return self
+ def __iter__(self):
+ return self
- def __next__(self):
- return self._next()
+ def __next__(self):
+ return self._next()
- def next(self):
- return self._next()
+ def next(self):
+ return self._next()
def _unary_request(rpc_event, state, request_deserializer):
- def unary_request():
- with state.condition:
- if state.client is _CANCELLED or state.statused:
- return None
- else:
- start_server_batch_result = rpc_event.operation_call.start_server_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
- _receive_message(
- state, rpc_event.operation_call, request_deserializer))
- state.due.add(_RECEIVE_MESSAGE_TOKEN)
- while True:
- state.condition.wait()
- if state.request is None:
- if state.client is _CLOSED:
- details = '"{}" requires exactly one request message.'.format(
- rpc_event.request_call_details.method)
- _abort(
- state, rpc_event.operation_call,
- cygrpc.StatusCode.unimplemented, _common.encode(details))
- return None
- elif state.client is _CANCELLED:
- return None
- else:
- request = state.request
- state.request = None
- return request
- return unary_request
+
+ def unary_request():
+ with state.condition:
+ if state.client is _CANCELLED or state.statused:
+ return None
+ else:
+ start_server_batch_result = rpc_event.operation_call.start_server_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _receive_message(state, rpc_event.operation_call,
+ request_deserializer))
+ state.due.add(_RECEIVE_MESSAGE_TOKEN)
+ while True:
+ state.condition.wait()
+ if state.request is None:
+ if state.client is _CLOSED:
+ details = '"{}" requires exactly one request message.'.format(
+ rpc_event.request_call_details.method)
+ _abort(state, rpc_event.operation_call,
+ cygrpc.StatusCode.unimplemented,
+ _common.encode(details))
+ return None
+ elif state.client is _CANCELLED:
+ return None
+ else:
+ request = state.request
+ state.request = None
+ return request
+
+ return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
- context = _Context(rpc_event, state, request_deserializer)
- try:
- return behavior(argument, context), True
- except Exception as e: # pylint: disable=broad-except
- with state.condition:
- if e not in state.rpc_errors:
- details = 'Exception calling application: {}'.format(e)
- logging.exception(details)
- _abort(state, rpc_event.operation_call,
- cygrpc.StatusCode.unknown, _common.encode(details))
- return None, False
+ context = _Context(rpc_event, state, request_deserializer)
+ try:
+ return behavior(argument, context), True
+ except Exception as e: # pylint: disable=broad-except
+ with state.condition:
+ if e not in state.rpc_errors:
+ details = 'Exception calling application: {}'.format(e)
+ logging.exception(details)
+ _abort(state, rpc_event.operation_call,
+ cygrpc.StatusCode.unknown, _common.encode(details))
+ return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
- try:
- return next(response_iterator), True
- except StopIteration:
- return None, True
- except Exception as e: # pylint: disable=broad-except
- with state.condition:
- if e not in state.rpc_errors:
- details = 'Exception iterating responses: {}'.format(e)
- logging.exception(details)
- _abort(state, rpc_event.operation_call,
- cygrpc.StatusCode.unknown, _common.encode(details))
- return None, False
+ try:
+ return next(response_iterator), True
+ except StopIteration:
+ return None, True
+ except Exception as e: # pylint: disable=broad-except
+ with state.condition:
+ if e not in state.rpc_errors:
+ details = 'Exception iterating responses: {}'.format(e)
+ logging.exception(details)
+ _abort(state, rpc_event.operation_call,
+ cygrpc.StatusCode.unknown, _common.encode(details))
+ return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
- serialized_response = _common.serialize(response, response_serializer)
- if serialized_response is None:
- with state.condition:
- _abort(
- state, rpc_event.operation_call, cygrpc.StatusCode.internal,
- b'Failed to serialize response!')
- return None
- else:
- return serialized_response
+ serialized_response = _common.serialize(response, response_serializer)
+ if serialized_response is None:
+ with state.condition:
+ _abort(state, rpc_event.operation_call, cygrpc.StatusCode.internal,
+ b'Failed to serialize response!')
+ return None
+ else:
+ return serialized_response
def _send_response(rpc_event, state, serialized_response):
- with state.condition:
- if state.client is _CANCELLED or state.statused:
- return False
- else:
- if state.initial_metadata_allowed:
- operations = (
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
- )
- state.initial_metadata_allowed = False
- token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
- else:
- operations = (
- cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
- )
- token = _SEND_MESSAGE_TOKEN
- rpc_event.operation_call.start_server_batch(
- cygrpc.Operations(operations), _send_message(state, token))
- state.due.add(token)
- while True:
- state.condition.wait()
- if token not in state.due:
- return state.client is not _CANCELLED and not state.statused
+ with state.condition:
+ if state.client is _CANCELLED or state.statused:
+ return False
+ else:
+ if state.initial_metadata_allowed:
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_response,
+ _EMPTY_FLAGS),)
+ state.initial_metadata_allowed = False
+ token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
+ else:
+ operations = (cygrpc.operation_send_message(serialized_response,
+ _EMPTY_FLAGS),)
+ token = _SEND_MESSAGE_TOKEN
+ rpc_event.operation_call.start_server_batch(
+ cygrpc.Operations(operations), _send_message(state, token))
+ state.due.add(token)
+ while True:
+ state.condition.wait()
+ if token not in state.due:
+ return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
- with state.condition:
- if state.client is not _CANCELLED:
- trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
- code = _completion_code(state)
- details = _details(state)
- operations = [
- cygrpc.operation_send_status_from_server(
- trailing_metadata, code, details, _EMPTY_FLAGS),
- ]
- if state.initial_metadata_allowed:
- operations.append(
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS))
- if serialized_response is not None:
- operations.append(cygrpc.operation_send_message(
- serialized_response, _EMPTY_FLAGS))
- rpc_event.operation_call.start_server_batch(
- cygrpc.Operations(operations),
- _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
- state.statused = True
- state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
-
-
-def _unary_response_in_pool(
- rpc_event, state, behavior, argument_thunk, request_deserializer,
- response_serializer):
- argument = argument_thunk()
- if argument is not None:
- response, proceed = _call_behavior(
- rpc_event, state, behavior, argument, request_deserializer)
- if proceed:
- serialized_response = _serialize_response(
- rpc_event, state, response, response_serializer)
- if serialized_response is not None:
- _status(rpc_event, state, serialized_response)
-
-
-def _stream_response_in_pool(
- rpc_event, state, behavior, argument_thunk, request_deserializer,
- response_serializer):
- argument = argument_thunk()
- if argument is not None:
- response_iterator, proceed = _call_behavior(
- rpc_event, state, behavior, argument, request_deserializer)
- if proceed:
- while True:
- response, proceed = _take_response_from_response_iterator(
- rpc_event, state, response_iterator)
+ with state.condition:
+ if state.client is not _CANCELLED:
+ trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
+ code = _completion_code(state)
+ details = _details(state)
+ operations = [
+ cygrpc.operation_send_status_from_server(
+ trailing_metadata, code, details, _EMPTY_FLAGS),
+ ]
+ if state.initial_metadata_allowed:
+ operations.append(
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS))
+ if serialized_response is not None:
+ operations.append(
+ cygrpc.operation_send_message(serialized_response,
+ _EMPTY_FLAGS))
+ rpc_event.operation_call.start_server_batch(
+ cygrpc.Operations(operations),
+ _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
+ state.statused = True
+ state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
+
+
+def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
+ request_deserializer, response_serializer):
+ argument = argument_thunk()
+ if argument is not None:
+ response, proceed = _call_behavior(rpc_event, state, behavior, argument,
+ request_deserializer)
if proceed:
- if response is None:
- _status(rpc_event, state, None)
- break
- else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
- proceed = _send_response(rpc_event, state, serialized_response)
- if not proceed:
- break
- else:
- break
- else:
- break
+ _status(rpc_event, state, serialized_response)
+
+
+def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
+ request_deserializer, response_serializer):
+ argument = argument_thunk()
+ if argument is not None:
+ response_iterator, proceed = _call_behavior(
+ rpc_event, state, behavior, argument, request_deserializer)
+ if proceed:
+ while True:
+ response, proceed = _take_response_from_response_iterator(
+ rpc_event, state, response_iterator)
+ if proceed:
+ if response is None:
+ _status(rpc_event, state, None)
+ break
+ else:
+ serialized_response = _serialize_response(
+ rpc_event, state, response, response_serializer)
+ if serialized_response is not None:
+ proceed = _send_response(rpc_event, state,
+ serialized_response)
+ if not proceed:
+ break
+ else:
+ break
+ else:
+ break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
- unary_request = _unary_request(
- rpc_event, state, method_handler.request_deserializer)
- thread_pool.submit(
- _unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
- unary_request, method_handler.request_deserializer,
- method_handler.response_serializer)
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.unary_unary, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
- unary_request = _unary_request(
- rpc_event, state, method_handler.request_deserializer)
- thread_pool.submit(
- _stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
- unary_request, method_handler.request_deserializer,
- method_handler.response_serializer)
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.unary_stream, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
- request_iterator = _RequestIterator(
- state, rpc_event.operation_call, method_handler.request_deserializer)
- thread_pool.submit(
- _unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
- lambda: request_iterator, method_handler.request_deserializer,
- method_handler.response_serializer)
+ request_iterator = _RequestIterator(state, rpc_event.operation_call,
+ method_handler.request_deserializer)
+ thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.stream_unary, lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
- request_iterator = _RequestIterator(
- state, rpc_event.operation_call, method_handler.request_deserializer)
- thread_pool.submit(
- _stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
- lambda: request_iterator, method_handler.request_deserializer,
- method_handler.response_serializer)
+ request_iterator = _RequestIterator(state, rpc_event.operation_call,
+ method_handler.request_deserializer)
+ thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.stream_stream, lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
- for generic_handler in generic_handlers:
- method_handler = generic_handler.service(
- _HandlerCallDetails(
- _common.decode(rpc_event.request_call_details.method),
- rpc_event.request_metadata))
- if method_handler is not None:
- return method_handler
- else:
- return None
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(
+ _HandlerCallDetails(
+ _common.decode(rpc_event.request_call_details.method),
+ rpc_event.request_metadata))
+ if method_handler is not None:
+ return method_handler
+ else:
+ return None
def _handle_unrecognized_method(rpc_event):
- operations = (
- cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- _EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
- b'Method not found!', _EMPTY_FLAGS),
- )
- rpc_state = _RPCState()
- rpc_event.operation_call.start_server_batch(
- operations, lambda ignored_event: (rpc_state, (),))
- return rpc_state
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
+ b'Method not found!', _EMPTY_FLAGS),)
+ rpc_state = _RPCState()
+ rpc_event.operation_call.start_server_batch(operations,
+ lambda ignored_event: (
+ rpc_state,
+ (),))
+ return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
- state = _RPCState()
- with state.condition:
- rpc_event.operation_call.start_server_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
- _receive_close_on_server(state))
- state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
- if method_handler.request_streaming:
- if method_handler.response_streaming:
- _handle_stream_stream(rpc_event, state, method_handler, thread_pool)
- else:
- _handle_stream_unary(rpc_event, state, method_handler, thread_pool)
- else:
- if method_handler.response_streaming:
- _handle_unary_stream(rpc_event, state, method_handler, thread_pool)
- else:
- _handle_unary_unary(rpc_event, state, method_handler, thread_pool)
- return state
+ state = _RPCState()
+ with state.condition:
+ rpc_event.operation_call.start_server_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+ _receive_close_on_server(state))
+ state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
+ if method_handler.request_streaming:
+ if method_handler.response_streaming:
+ _handle_stream_stream(rpc_event, state, method_handler,
+ thread_pool)
+ else:
+ _handle_stream_unary(rpc_event, state, method_handler,
+ thread_pool)
+ else:
+ if method_handler.response_streaming:
+ _handle_unary_stream(rpc_event, state, method_handler,
+ thread_pool)
+ else:
+ _handle_unary_unary(rpc_event, state, method_handler,
+ thread_pool)
+ return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
- if rpc_event.request_call_details.method is not None:
- method_handler = _find_method_handler(rpc_event, generic_handlers)
- if method_handler is None:
- return _handle_unrecognized_method(rpc_event)
+ if not rpc_event.success:
+ return None
+ if rpc_event.request_call_details.method is not None:
+ method_handler = _find_method_handler(rpc_event, generic_handlers)
+ if method_handler is None:
+ return _handle_unrecognized_method(rpc_event)
+ else:
+ return _handle_with_method_handler(rpc_event, method_handler,
+ thread_pool)
else:
- return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
- else:
- return None
+ return None
@enum.unique
class _ServerStage(enum.Enum):
- STOPPED = 'stopped'
- STARTED = 'started'
- GRACE = 'grace'
+ STOPPED = 'stopped'
+ STARTED = 'started'
+ GRACE = 'grace'
class _ServerState(object):
- def __init__(self, completion_queue, server, generic_handlers, thread_pool):
- self.lock = threading.Lock()
- self.completion_queue = completion_queue
- self.server = server
- self.generic_handlers = list(generic_handlers)
- self.thread_pool = thread_pool
- self.stage = _ServerStage.STOPPED
- self.shutdown_events = None
+ def __init__(self, completion_queue, server, generic_handlers, thread_pool):
+ self.lock = threading.Lock()
+ self.completion_queue = completion_queue
+ self.server = server
+ self.generic_handlers = list(generic_handlers)
+ self.thread_pool = thread_pool
+ self.stage = _ServerStage.STOPPED
+ self.shutdown_events = None
- # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
- self.rpc_states = set()
- self.due = set()
+ # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
+ self.rpc_states = set()
+ self.due = set()
def _add_generic_handlers(state, generic_handlers):
- with state.lock:
- state.generic_handlers.extend(generic_handlers)
+ with state.lock:
+ state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
- with state.lock:
- return state.server.add_http2_port(address)
+ with state.lock:
+ return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
- with state.lock:
- return state.server.add_http2_port(address, server_credentials._credentials)
+ with state.lock:
+ return state.server.add_http2_port(address,
+ server_credentials._credentials)
def _request_call(state):
- state.server.request_call(
- state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
- state.due.add(_REQUEST_CALL_TAG)
+ state.server.request_call(state.completion_queue, state.completion_queue,
+ _REQUEST_CALL_TAG)
+ state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
- if not state.rpc_states and not state.due:
- for shutdown_event in state.shutdown_events:
- shutdown_event.set()
- state.stage = _ServerStage.STOPPED
- return True
- else:
- return False
+ if not state.rpc_states and not state.due:
+ for shutdown_event in state.shutdown_events:
+ shutdown_event.set()
+ state.stage = _ServerStage.STOPPED
+ return True
+ else:
+ return False
def _serve(state):
- while True:
- event = state.completion_queue.poll()
- if event.tag is _SHUTDOWN_TAG:
- with state.lock:
- state.due.remove(_SHUTDOWN_TAG)
- if _stop_serving(state):
- return
- elif event.tag is _REQUEST_CALL_TAG:
- with state.lock:
- state.due.remove(_REQUEST_CALL_TAG)
- rpc_state = _handle_call(
- event, state.generic_handlers, state.thread_pool)
- if rpc_state is not None:
- state.rpc_states.add(rpc_state)
- if state.stage is _ServerStage.STARTED:
- _request_call(state)
- elif _stop_serving(state):
- return
- else:
- rpc_state, callbacks = event.tag(event)
- for callback in callbacks:
- callable_util.call_logging_exceptions(
- callback, 'Exception calling callback!')
- if rpc_state is not None:
- with state.lock:
- state.rpc_states.remove(rpc_state)
- if _stop_serving(state):
- return
+ while True:
+ event = state.completion_queue.poll()
+ if event.tag is _SHUTDOWN_TAG:
+ with state.lock:
+ state.due.remove(_SHUTDOWN_TAG)
+ if _stop_serving(state):
+ return
+ elif event.tag is _REQUEST_CALL_TAG:
+ with state.lock:
+ state.due.remove(_REQUEST_CALL_TAG)
+ rpc_state = _handle_call(event, state.generic_handlers,
+ state.thread_pool)
+ if rpc_state is not None:
+ state.rpc_states.add(rpc_state)
+ if state.stage is _ServerStage.STARTED:
+ _request_call(state)
+ elif _stop_serving(state):
+ return
+ else:
+ rpc_state, callbacks = event.tag(event)
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, 'Exception calling callback!')
+ if rpc_state is not None:
+ with state.lock:
+ state.rpc_states.remove(rpc_state)
+ if _stop_serving(state):
+ return
def _stop(state, grace):
- with state.lock:
- if state.stage is _ServerStage.STOPPED:
- shutdown_event = threading.Event()
- shutdown_event.set()
- return shutdown_event
- else:
- if state.stage is _ServerStage.STARTED:
- state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
- state.stage = _ServerStage.GRACE
- state.shutdown_events = []
- state.due.add(_SHUTDOWN_TAG)
- shutdown_event = threading.Event()
- state.shutdown_events.append(shutdown_event)
- if grace is None:
- state.server.cancel_all_calls()
- # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
- for rpc_state in state.rpc_states:
- with rpc_state.condition:
- rpc_state.client = _CANCELLED
- rpc_state.condition.notify_all()
- else:
- def cancel_all_calls_after_grace():
- shutdown_event.wait(timeout=grace)
- with state.lock:
- state.server.cancel_all_calls()
- # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
- for rpc_state in state.rpc_states:
- with rpc_state.condition:
- rpc_state.client = _CANCELLED
- rpc_state.condition.notify_all()
- thread = threading.Thread(target=cancel_all_calls_after_grace)
- thread.start()
- return shutdown_event
- shutdown_event.wait()
- return shutdown_event
+ with state.lock:
+ if state.stage is _ServerStage.STOPPED:
+ shutdown_event = threading.Event()
+ shutdown_event.set()
+ return shutdown_event
+ else:
+ if state.stage is _ServerStage.STARTED:
+ state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
+ state.stage = _ServerStage.GRACE
+ state.shutdown_events = []
+ state.due.add(_SHUTDOWN_TAG)
+ shutdown_event = threading.Event()
+ state.shutdown_events.append(shutdown_event)
+ if grace is None:
+ state.server.cancel_all_calls()
+ # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
+ for rpc_state in state.rpc_states:
+ with rpc_state.condition:
+ rpc_state.client = _CANCELLED
+ rpc_state.condition.notify_all()
+ else:
+
+ def cancel_all_calls_after_grace():
+ shutdown_event.wait(timeout=grace)
+ with state.lock:
+ state.server.cancel_all_calls()
+ # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
+ for rpc_state in state.rpc_states:
+ with rpc_state.condition:
+ rpc_state.client = _CANCELLED
+ rpc_state.condition.notify_all()
+
+ thread = threading.Thread(target=cancel_all_calls_after_grace)
+ thread.start()
+ return shutdown_event
+ shutdown_event.wait()
+ return shutdown_event
def _start(state):
- with state.lock:
- if state.stage is not _ServerStage.STOPPED:
- raise ValueError('Cannot start already-started server!')
- state.server.start()
- state.stage = _ServerStage.STARTED
- _request_call(state)
- def cleanup_server(timeout):
- if timeout is None:
- _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
- else:
- _stop(state, timeout).wait()
-
- thread = _common.CleanupThread(
- cleanup_server, target=_serve, args=(state,))
- thread.start()
+ with state.lock:
+ if state.stage is not _ServerStage.STOPPED:
+ raise ValueError('Cannot start already-started server!')
+ state.server.start()
+ state.stage = _ServerStage.STARTED
+ _request_call(state)
+
+ def cleanup_server(timeout):
+ if timeout is None:
+ _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
+ else:
+ _stop(state, timeout).wait()
+
+ thread = _common.CleanupThread(
+ cleanup_server, target=_serve, args=(state,))
+ thread.start()
+
class Server(grpc.Server):
- def __init__(self, thread_pool, generic_handlers, options):
- completion_queue = cygrpc.CompletionQueue()
- server = cygrpc.Server(_common.channel_args(options))
- server.register_completion_queue(completion_queue)
- self._state = _ServerState(
- completion_queue, server, generic_handlers, thread_pool)
+ def __init__(self, thread_pool, generic_handlers, options):
+ completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(_common.channel_args(options))
+ server.register_completion_queue(completion_queue)
+ self._state = _ServerState(completion_queue, server, generic_handlers,
+ thread_pool)
- def add_generic_rpc_handlers(self, generic_rpc_handlers):
- _add_generic_handlers(self._state, generic_rpc_handlers)
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ _add_generic_handlers(self._state, generic_rpc_handlers)
- def add_insecure_port(self, address):
- return _add_insecure_port(self._state, _common.encode(address))
+ def add_insecure_port(self, address):
+ return _add_insecure_port(self._state, _common.encode(address))
- def add_secure_port(self, address, server_credentials):
- return _add_secure_port(self._state, _common.encode(address), server_credentials)
+ def add_secure_port(self, address, server_credentials):
+ return _add_secure_port(self._state,
+ _common.encode(address), server_credentials)
- def start(self):
- _start(self._state)
+ def start(self):
+ _start(self._state)
- def stop(self, grace):
- return _stop(self._state, grace)
+ def stop(self, grace):
+ return _stop(self._state, grace)
- def __del__(self):
- _stop(self._state, None)
+ def __del__(self):
+ _stop(self._state, None)
diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py
index a375896e6e..7c602eb37e 100644
--- a/src/python/grpcio/grpc/_utilities.py
+++ b/src/python/grpcio/grpc/_utilities.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Internal utilities for gRPC Python."""
import collections
@@ -44,132 +43,136 @@ _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
class RpcMethodHandler(
- collections.namedtuple(
- '_RpcMethodHandler',
- ('request_streaming', 'response_streaming', 'request_deserializer',
- 'response_serializer', 'unary_unary', 'unary_stream', 'stream_unary',
- 'stream_stream',)),
- grpc.RpcMethodHandler):
- pass
+ collections.namedtuple('_RpcMethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',)), grpc.RpcMethodHandler):
+ pass
class DictionaryGenericHandler(grpc.ServiceRpcHandler):
- def __init__(self, service, method_handlers):
- self._name = service
- self._method_handlers = {
- _common.fully_qualified_method(service, method): method_handler
- for method, method_handler in six.iteritems(method_handlers)}
+ def __init__(self, service, method_handlers):
+ self._name = service
+ self._method_handlers = {
+ _common.fully_qualified_method(service, method): method_handler
+ for method, method_handler in six.iteritems(method_handlers)
+ }
- def service_name(self):
- return self._name
+ def service_name(self):
+ return self._name
- def service(self, handler_call_details):
- return self._method_handlers.get(handler_call_details.method)
+ def service(self, handler_call_details):
+ return self._method_handlers.get(handler_call_details.method)
class _ChannelReadyFuture(grpc.Future):
- def __init__(self, channel):
- self._condition = threading.Condition()
- self._channel = channel
-
- self._matured = False
- self._cancelled = False
- self._done_callbacks = []
-
- def _block(self, timeout):
- until = None if timeout is None else time.time() + timeout
- with self._condition:
- while True:
- if self._cancelled:
- raise grpc.FutureCancelledError()
- elif self._matured:
- return
- else:
- if until is None:
- self._condition.wait()
- else:
- remaining = until - time.time()
- if remaining < 0:
- raise grpc.FutureTimeoutError()
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise grpc.FutureCancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is grpc.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
else:
- self._condition.wait(timeout=remaining)
-
- def _update(self, connectivity):
- with self._condition:
- if (not self._cancelled and
- connectivity is grpc.ChannelConnectivity.READY):
- self._matured = True
- self._channel.unsubscribe(self._update)
- self._condition.notify_all()
- done_callbacks = tuple(self._done_callbacks)
- self._done_callbacks = None
- else:
- return
-
- for done_callback in done_callbacks:
- callable_util.call_logging_exceptions(
- done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
-
- def cancel(self):
- with self._condition:
- if not self._matured:
- self._cancelled = True
- self._channel.unsubscribe(self._update)
- self._condition.notify_all()
- done_callbacks = tuple(self._done_callbacks)
- self._done_callbacks = None
- else:
- return False
-
- for done_callback in done_callbacks:
- callable_util.call_logging_exceptions(
- done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
-
- def cancelled(self):
- with self._condition:
- return self._cancelled
-
- def running(self):
- with self._condition:
- return not self._cancelled and not self._matured
-
- def done(self):
- with self._condition:
- return self._cancelled or self._matured
-
- def result(self, timeout=None):
- self._block(timeout)
- return None
-
- def exception(self, timeout=None):
- self._block(timeout)
- return None
-
- def traceback(self, timeout=None):
- self._block(timeout)
- return None
-
- def add_done_callback(self, fn):
- with self._condition:
- if not self._cancelled and not self._matured:
- self._done_callbacks.append(fn)
- return
-
- fn(self)
-
- def start(self):
- with self._condition:
- self._channel.subscribe(self._update, try_to_connect=True)
-
- def __del__(self):
- with self._condition:
- if not self._cancelled and not self._matured:
- self._channel.unsubscribe(self._update)
+ return False
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
def channel_ready_future(channel):
- ready_future = _ChannelReadyFuture(channel)
- ready_future.start()
- return ready_future
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py
index e4ee44d7a3..e5b28e9408 100644
--- a/src/python/grpcio/grpc/beta/_client_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_client_adaptations.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
import grpc
@@ -38,531 +37,654 @@ from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
- grpc.StatusCode.CANCELLED: (
- face.Abortion.Kind.CANCELLED, face.CancellationError),
- grpc.StatusCode.UNKNOWN: (
- face.Abortion.Kind.REMOTE_FAILURE, face.RemoteError),
- grpc.StatusCode.DEADLINE_EXCEEDED: (
- face.Abortion.Kind.EXPIRED, face.ExpirationError),
- grpc.StatusCode.UNIMPLEMENTED: (
- face.Abortion.Kind.LOCAL_FAILURE, face.LocalError),
+ grpc.StatusCode.CANCELLED: (face.Abortion.Kind.CANCELLED,
+ face.CancellationError),
+ grpc.StatusCode.UNKNOWN: (face.Abortion.Kind.REMOTE_FAILURE,
+ face.RemoteError),
+ grpc.StatusCode.DEADLINE_EXCEEDED: (face.Abortion.Kind.EXPIRED,
+ face.ExpirationError),
+ grpc.StatusCode.UNIMPLEMENTED: (face.Abortion.Kind.LOCAL_FAILURE,
+ face.LocalError),
}
def _effective_metadata(metadata, metadata_transformer):
- non_none_metadata = () if metadata is None else metadata
- if metadata_transformer is None:
- return non_none_metadata
- else:
- return metadata_transformer(non_none_metadata)
+ non_none_metadata = () if metadata is None else metadata
+ if metadata_transformer is None:
+ return non_none_metadata
+ else:
+ return metadata_transformer(non_none_metadata)
def _credentials(grpc_call_options):
- return None if grpc_call_options is None else grpc_call_options.credentials
+ return None if grpc_call_options is None else grpc_call_options.credentials
def _abortion(rpc_error_call):
- code = rpc_error_call.code()
- pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
- error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
- return face.Abortion(
- error_kind, rpc_error_call.initial_metadata(),
- rpc_error_call.trailing_metadata(), code, rpc_error_call.details())
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
+ return face.Abortion(error_kind,
+ rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
def _abortion_error(rpc_error_call):
- code = rpc_error_call.code()
- pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
- exception_class = face.AbortionError if pair is None else pair[1]
- return exception_class(
- rpc_error_call.initial_metadata(), rpc_error_call.trailing_metadata(),
- code, rpc_error_call.details())
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ exception_class = face.AbortionError if pair is None else pair[1]
+ return exception_class(rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
- def disable_next_request_compression(self):
- pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+ def disable_next_request_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _Rendezvous(future.Future, face.Call):
- def __init__(self, response_future, response_iterator, call):
- self._future = response_future
- self._iterator = response_iterator
- self._call = call
+ def __init__(self, response_future, response_iterator, call):
+ self._future = response_future
+ self._iterator = response_iterator
+ self._call = call
- def cancel(self):
- return self._call.cancel()
+ def cancel(self):
+ return self._call.cancel()
- def cancelled(self):
- return self._future.cancelled()
+ def cancelled(self):
+ return self._future.cancelled()
- def running(self):
- return self._future.running()
+ def running(self):
+ return self._future.running()
- def done(self):
- return self._future.done()
+ def done(self):
+ return self._future.done()
- def result(self, timeout=None):
- try:
- return self._future.result(timeout=timeout)
- except grpc.RpcError as rpc_error_call:
- raise _abortion_error(rpc_error_call)
- except grpc.FutureTimeoutError:
- raise future.TimeoutError()
- except grpc.FutureCancelledError:
- raise future.CancelledError()
+ def result(self, timeout=None):
+ try:
+ return self._future.result(timeout=timeout)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
- def exception(self, timeout=None):
- try:
- rpc_error_call = self._future.exception(timeout=timeout)
- if rpc_error_call is None:
- return None
- else:
- return _abortion_error(rpc_error_call)
- except grpc.FutureTimeoutError:
- raise future.TimeoutError()
- except grpc.FutureCancelledError:
- raise future.CancelledError()
-
- def traceback(self, timeout=None):
- try:
- return self._future.traceback(timeout=timeout)
- except grpc.FutureTimeoutError:
- raise future.TimeoutError()
- except grpc.FutureCancelledError:
- raise future.CancelledError()
+ def exception(self, timeout=None):
+ try:
+ rpc_error_call = self._future.exception(timeout=timeout)
+ if rpc_error_call is None:
+ return None
+ else:
+ return _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
- def add_done_callback(self, fn):
- self._future.add_done_callback(lambda ignored_callback: fn(self))
+ def traceback(self, timeout=None):
+ try:
+ return self._future.traceback(timeout=timeout)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
- def __iter__(self):
- return self
+ def add_done_callback(self, fn):
+ self._future.add_done_callback(lambda ignored_callback: fn(self))
- def _next(self):
- try:
- return next(self._iterator)
- except grpc.RpcError as rpc_error_call:
- raise _abortion_error(rpc_error_call)
+ def __iter__(self):
+ return self
+
+ def _next(self):
+ try:
+ return next(self._iterator)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+ def __next__(self):
+ return self._next()
- def __next__(self):
- return self._next()
+ def next(self):
+ return self._next()
- def next(self):
- return self._next()
+ def is_active(self):
+ return self._call.is_active()
- def is_active(self):
- return self._call.is_active()
+ def time_remaining(self):
+ return self._call.time_remaining()
- def time_remaining(self):
- return self._call.time_remaining()
+ def add_abortion_callback(self, abortion_callback):
- def add_abortion_callback(self, abortion_callback):
- def done_callback():
- if self.code() is not grpc.StatusCode.OK:
- abortion_callback(_abortion(self._call))
- registered = self._call.add_callback(done_callback)
- return None if registered else done_callback()
+ def done_callback():
+ if self.code() is not grpc.StatusCode.OK:
+ abortion_callback(_abortion(self._call))
- def protocol_context(self):
- return _InvocationProtocolContext()
+ registered = self._call.add_callback(done_callback)
+ return None if registered else done_callback()
- def initial_metadata(self):
- return self._call.initial_metadata()
+ def protocol_context(self):
+ return _InvocationProtocolContext()
- def terminal_metadata(self):
- return self._call.terminal_metadata()
+ def initial_metadata(self):
+ return self._call.initial_metadata()
- def code(self):
- return self._call.code()
+ def terminal_metadata(self):
+ return self._call.terminal_metadata()
- def details(self):
- return self._call.details()
+ def code(self):
+ return self._call.code()
+ def details(self):
+ return self._call.details()
-def _blocking_unary_unary(
- channel, group, method, timeout, with_call, protocol_options, metadata,
- metadata_transformer, request, request_serializer, response_deserializer):
- try:
+
+def _blocking_unary_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request, request_serializer, response_deserializer):
+ try:
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(
+ request,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_unary_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request,
+ request_serializer, response_deserializer):
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
- if with_call:
- response, call = multi_callable.with_call(
- request, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return response, _Rendezvous(None, None, call)
- else:
- return multi_callable(
- request, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- except grpc.RpcError as rpc_error_call:
- raise _abortion_error(rpc_error_call)
-
-
-def _future_unary_unary(
- channel, group, method, timeout, protocol_options, metadata,
- metadata_transformer, request, request_serializer, response_deserializer):
- multi_callable = channel.unary_unary(
- _common.fully_qualified_method(group, method),
- request_serializer=request_serializer,
- response_deserializer=response_deserializer)
- effective_metadata = _effective_metadata(metadata, metadata_transformer)
- response_future = multi_callable.future(
- request, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return _Rendezvous(response_future, None, response_future)
-
-
-def _unary_stream(
- channel, group, method, timeout, protocol_options, metadata,
- metadata_transformer, request, request_serializer, response_deserializer):
- multi_callable = channel.unary_stream(
- _common.fully_qualified_method(group, method),
- request_serializer=request_serializer,
- response_deserializer=response_deserializer)
- effective_metadata = _effective_metadata(metadata, metadata_transformer)
- response_iterator = multi_callable(
- request, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return _Rendezvous(None, response_iterator, response_iterator)
-
-
-def _blocking_stream_unary(
- channel, group, method, timeout, with_call, protocol_options, metadata,
- metadata_transformer, request_iterator, request_serializer,
- response_deserializer):
- try:
+ response_future = multi_callable.future(
+ request,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _unary_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request, request_serializer,
+ response_deserializer):
+ multi_callable = channel.unary_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+def _blocking_stream_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer):
+ try:
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request_iterator,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(
+ request_iterator,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_stream_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request_iterator,
+ request_serializer, response_deserializer):
multi_callable = channel.stream_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
- if with_call:
- response, call = multi_callable.with_call(
- request_iterator, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return response, _Rendezvous(None, None, call)
- else:
- return multi_callable(
- request_iterator, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- except grpc.RpcError as rpc_error_call:
- raise _abortion_error(rpc_error_call)
-
-
-def _future_stream_unary(
- channel, group, method, timeout, protocol_options, metadata,
- metadata_transformer, request_iterator, request_serializer,
- response_deserializer):
- multi_callable = channel.stream_unary(
- _common.fully_qualified_method(group, method),
- request_serializer=request_serializer,
- response_deserializer=response_deserializer)
- effective_metadata = _effective_metadata(metadata, metadata_transformer)
- response_future = multi_callable.future(
- request_iterator, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return _Rendezvous(response_future, None, response_future)
-
-
-def _stream_stream(
- channel, group, method, timeout, protocol_options, metadata,
- metadata_transformer, request_iterator, request_serializer,
- response_deserializer):
- multi_callable = channel.stream_stream(
- _common.fully_qualified_method(group, method),
- request_serializer=request_serializer,
- response_deserializer=response_deserializer)
- effective_metadata = _effective_metadata(metadata, metadata_transformer)
- response_iterator = multi_callable(
- request_iterator, timeout=timeout, metadata=effective_metadata,
- credentials=_credentials(protocol_options))
- return _Rendezvous(None, response_iterator, response_iterator)
+ response_future = multi_callable.future(
+ request_iterator,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _stream_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ multi_callable = channel.stream_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request_iterator,
+ timeout=timeout,
+ metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
- def __init__(
- self, channel, group, method, metadata_transformer, request_serializer,
- response_deserializer):
- self._channel = channel
- self._group = group
- self._method = method
- self._metadata_transformer = metadata_transformer
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(
- self, request, timeout, metadata=None, with_call=False,
- protocol_options=None):
- return _blocking_unary_unary(
- self._channel, self._group, self._method, timeout, with_call,
- protocol_options, metadata, self._metadata_transformer, request,
- self._request_serializer, self._response_deserializer)
-
- def future(self, request, timeout, metadata=None, protocol_options=None):
- return _future_unary_unary(
- self._channel, self._group, self._method, timeout, protocol_options,
- metadata, self._metadata_transformer, request, self._request_serializer,
- self._response_deserializer)
-
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_unary_unary(
+ self._channel, self._group, self._method, timeout, with_call,
+ protocol_options, metadata, self._metadata_transformer, request,
+ self._request_serializer, self._response_deserializer)
+
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ return _future_unary_unary(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request,
+ self._request_serializer, self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
- def __init__(
- self, channel, group, method, metadata_transformer, request_serializer,
- response_deserializer):
- self._channel = channel
- self._group = group
- self._method = method
- self._metadata_transformer = metadata_transformer
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(self, request, timeout, metadata=None, protocol_options=None):
- return _unary_stream(
- self._channel, self._group, self._method, timeout, protocol_options,
- metadata, self._metadata_transformer, request, self._request_serializer,
- self._response_deserializer)
-
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ return _unary_stream(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request,
+ self._request_serializer, self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
- def __init__(
- self, channel, group, method, metadata_transformer, request_serializer,
- response_deserializer):
- self._channel = channel
- self._group = group
- self._method = method
- self._metadata_transformer = metadata_transformer
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(
- self, request_iterator, timeout, metadata=None, with_call=False,
- protocol_options=None):
- return _blocking_stream_unary(
- self._channel, self._group, self._method, timeout, with_call,
- protocol_options, metadata, self._metadata_transformer,
- request_iterator, self._request_serializer, self._response_deserializer)
-
- def future(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- return _future_stream_unary(
- self._channel, self._group, self._method, timeout, protocol_options,
- metadata, self._metadata_transformer, request_iterator,
- self._request_serializer, self._response_deserializer)
-
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- raise NotImplementedError()
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_stream_unary(
+ self._channel, self._group, self._method, timeout, with_call,
+ protocol_options, metadata, self._metadata_transformer,
+ request_iterator, self._request_serializer,
+ self._response_deserializer)
+
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _future_stream_unary(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ self._request_serializer, self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
- def __init__(
- self, channel, group, method, metadata_transformer, request_serializer,
- response_deserializer):
- self._channel = channel
- self._group = group
- self._method = method
- self._metadata_transformer = metadata_transformer
- self._request_serializer = request_serializer
- self._response_deserializer = response_deserializer
-
- def __call__(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- return _stream_stream(
- self._channel, self._group, self._method, timeout, protocol_options,
- metadata, self._metadata_transformer, request_iterator,
- self._request_serializer, self._response_deserializer)
-
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- raise NotImplementedError()
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _stream_stream(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ self._request_serializer, self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
class _GenericStub(face.GenericStub):
- def __init__(
- self, channel, metadata_transformer, request_serializers,
- response_deserializers):
- self._channel = channel
- self._metadata_transformer = metadata_transformer
- self._request_serializers = request_serializers or {}
- self._response_deserializers = response_deserializers or {}
-
- def blocking_unary_unary(
- self, group, method, request, timeout, metadata=None,
- with_call=None, protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _blocking_unary_unary(
- self._channel, group, method, timeout, with_call, protocol_options,
- metadata, self._metadata_transformer, request, request_serializer,
- response_deserializer)
-
- def future_unary_unary(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _future_unary_unary(
- self._channel, group, method, timeout, protocol_options, metadata,
- self._metadata_transformer, request, request_serializer,
- response_deserializer)
-
- def inline_unary_stream(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _unary_stream(
- self._channel, group, method, timeout, protocol_options, metadata,
- self._metadata_transformer, request, request_serializer,
- response_deserializer)
-
- def blocking_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- with_call=None, protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _blocking_stream_unary(
- self._channel, group, method, timeout, with_call, protocol_options,
- metadata, self._metadata_transformer, request_iterator,
- request_serializer, response_deserializer)
-
- def future_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _future_stream_unary(
- self._channel, group, method, timeout, protocol_options, metadata,
- self._metadata_transformer, request_iterator, request_serializer,
- response_deserializer)
-
- def inline_stream_stream(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _stream_stream(
- self._channel, group, method, timeout, protocol_options, metadata,
- self._metadata_transformer, request_iterator, request_serializer,
- response_deserializer)
-
- def event_unary_unary(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
-
- def event_unary_stream(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
-
- def event_stream_unary(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
-
- def event_stream_stream(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- raise NotImplementedError()
-
- def unary_unary(self, group, method):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _UnaryUnaryMultiCallable(
- self._channel, group, method, self._metadata_transformer,
- request_serializer, response_deserializer)
-
- def unary_stream(self, group, method):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _UnaryStreamMultiCallable(
- self._channel, group, method, self._metadata_transformer,
- request_serializer, response_deserializer)
-
- def stream_unary(self, group, method):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _StreamUnaryMultiCallable(
- self._channel, group, method, self._metadata_transformer,
- request_serializer, response_deserializer)
-
- def stream_stream(self, group, method):
- request_serializer = self._request_serializers.get((group, method,))
- response_deserializer = self._response_deserializers.get((group, method,))
- return _StreamStreamMultiCallable(
- self._channel, group, method, self._metadata_transformer,
- request_serializer, response_deserializer)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- return False
+ def __init__(self, channel, metadata_transformer, request_serializers,
+ response_deserializers):
+ self._channel = channel
+ self._metadata_transformer = metadata_transformer
+ self._request_serializers = request_serializers or {}
+ self._response_deserializers = response_deserializers or {}
+
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _blocking_unary_unary(self._channel, group, method, timeout,
+ with_call, protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _future_unary_unary(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _unary_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _blocking_stream_unary(
+ self._channel, group, method, timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ request_serializer, response_deserializer)
+
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _future_stream_unary(
+ self._channel, group, method, timeout, protocol_options, metadata,
+ self._metadata_transformer, request_iterator, request_serializer,
+ response_deserializer)
+
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _stream_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request_iterator,
+ request_serializer, response_deserializer)
+
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def unary_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _UnaryUnaryMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def unary_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _UnaryStreamMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def stream_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _StreamUnaryMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def stream_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,))
+ return _StreamStreamMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
class _DynamicStub(face.DynamicStub):
- def __init__(self, generic_stub, group, cardinalities):
- self._generic_stub = generic_stub
- self._group = group
- self._cardinalities = cardinalities
-
- def __getattr__(self, attr):
- method_cardinality = self._cardinalities.get(attr)
- if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
- return self._generic_stub.unary_unary(self._group, attr)
- elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
- return self._generic_stub.unary_stream(self._group, attr)
- elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
- return self._generic_stub.stream_unary(self._group, attr)
- elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
- return self._generic_stub.stream_stream(self._group, attr)
- else:
- raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- return False
-
-
-def generic_stub(
- channel, host, metadata_transformer, request_serializers,
- response_deserializers):
- return _GenericStub(
- channel, metadata_transformer, request_serializers,
- response_deserializers)
-
-
-def dynamic_stub(
- channel, service, cardinalities, host, metadata_transformer,
- request_serializers, response_deserializers):
- return _DynamicStub(
- _GenericStub(
- channel, metadata_transformer, request_serializers,
- response_deserializers),
- service, cardinalities)
+ def __init__(self, generic_stub, group, cardinalities):
+ self._generic_stub = generic_stub
+ self._group = group
+ self._cardinalities = cardinalities
+
+ def __getattr__(self, attr):
+ method_cardinality = self._cardinalities.get(attr)
+ if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return self._generic_stub.unary_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return self._generic_stub.unary_stream(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return self._generic_stub.stream_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return self._generic_stub.stream_stream(self._group, attr)
+ else:
+ raise AttributeError('_DynamicStub object has no attribute "%s"!' %
+ attr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+def generic_stub(channel, host, metadata_transformer, request_serializers,
+ response_deserializers):
+ return _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers)
+
+
+def dynamic_stub(channel, service, cardinalities, host, metadata_transformer,
+ request_serializers, response_deserializers):
+ return _DynamicStub(
+ _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers), service, cardinalities)
diff --git a/src/python/grpcio/grpc/beta/_connectivity_channel.py b/src/python/grpcio/grpc/beta/_connectivity_channel.py
index 61674a70ad..39020d2b4e 100644
--- a/src/python/grpcio/grpc/beta/_connectivity_channel.py
+++ b/src/python/grpcio/grpc/beta/_connectivity_channel.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Affords a connectivity-state-listenable channel."""
import threading
@@ -41,116 +40,122 @@ _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
- state: connectivity for state, connectivity in zip(
- _types.ConnectivityState, interfaces.ChannelConnectivity)
+ state: connectivity
+ for state, connectivity in zip(_types.ConnectivityState,
+ interfaces.ChannelConnectivity)
}
class ConnectivityChannel(object):
- def __init__(self, low_channel):
- self._lock = threading.Lock()
- self._low_channel = low_channel
-
- self._polling = False
- self._connectivity = None
- self._try_to_connect = False
- self._callbacks_and_connectivities = []
- self._delivering = False
-
- def _deliveries(self, connectivity):
- callbacks_needing_update = []
- for callback_and_connectivity in self._callbacks_and_connectivities:
- callback, callback_connectivity = callback_and_connectivity
- if callback_connectivity is not connectivity:
- callbacks_needing_update.append(callback)
- callback_and_connectivity[1] = connectivity
- return callbacks_needing_update
-
- def _deliver(self, initial_connectivity, initial_callbacks):
- connectivity = initial_connectivity
- callbacks = initial_callbacks
- while True:
- for callback in callbacks:
- callable_util.call_logging_exceptions(
- callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
- connectivity)
- with self._lock:
- callbacks = self._deliveries(self._connectivity)
- if callbacks:
- connectivity = self._connectivity
- else:
- self._delivering = False
- return
-
- def _spawn_delivery(self, connectivity, callbacks):
- delivering_thread = threading.Thread(
- target=self._deliver, args=(connectivity, callbacks,))
- delivering_thread.start()
- self._delivering = True
+ def __init__(self, low_channel):
+ self._lock = threading.Lock()
+ self._low_channel = low_channel
- # TODO(issue 3064): Don't poll.
- def _poll_connectivity(self, low_channel, initial_try_to_connect):
- try_to_connect = initial_try_to_connect
- low_connectivity = low_channel.check_connectivity_state(try_to_connect)
- with self._lock:
- self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
- low_connectivity]
- callbacks = tuple(
- callback for callback, unused_but_known_to_be_none_connectivity
- in self._callbacks_and_connectivities)
- for callback_and_connectivity in self._callbacks_and_connectivities:
- callback_and_connectivity[1] = self._connectivity
- if callbacks:
- self._spawn_delivery(self._connectivity, callbacks)
- completion_queue = _low.CompletionQueue()
- while True:
- low_channel.watch_connectivity_state(
- low_connectivity, time.time() + 0.2, completion_queue, None)
- event = completion_queue.next()
- with self._lock:
- if not self._callbacks_and_connectivities and not self._try_to_connect:
- self._polling = False
- self._connectivity = None
- completion_queue.shutdown()
- break
- try_to_connect = self._try_to_connect
+ self._polling = False
+ self._connectivity = None
self._try_to_connect = False
- if event.success or try_to_connect:
+ self._callbacks_and_connectivities = []
+ self._delivering = False
+
+ def _deliveries(self, connectivity):
+ callbacks_needing_update = []
+ for callback_and_connectivity in self._callbacks_and_connectivities:
+ callback, callback_connectivity = callback_and_connectivity
+ if callback_connectivity is not connectivity:
+ callbacks_needing_update.append(callback)
+ callback_and_connectivity[1] = connectivity
+ return callbacks_needing_update
+
+ def _deliver(self, initial_connectivity, initial_callbacks):
+ connectivity = initial_connectivity
+ callbacks = initial_callbacks
+ while True:
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
+ connectivity)
+ with self._lock:
+ callbacks = self._deliveries(self._connectivity)
+ if callbacks:
+ connectivity = self._connectivity
+ else:
+ self._delivering = False
+ return
+
+ def _spawn_delivery(self, connectivity, callbacks):
+ delivering_thread = threading.Thread(
+ target=self._deliver, args=(
+ connectivity,
+ callbacks,))
+ delivering_thread.start()
+ self._delivering = True
+
+ # TODO(issue 3064): Don't poll.
+ def _poll_connectivity(self, low_channel, initial_try_to_connect):
+ try_to_connect = initial_try_to_connect
low_connectivity = low_channel.check_connectivity_state(try_to_connect)
with self._lock:
- self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
- low_connectivity]
- if not self._delivering:
- callbacks = self._deliveries(self._connectivity)
+ self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ low_connectivity]
+ callbacks = tuple(
+ callback
+ for callback, unused_but_known_to_be_none_connectivity in
+ self._callbacks_and_connectivities)
+ for callback_and_connectivity in self._callbacks_and_connectivities:
+ callback_and_connectivity[1] = self._connectivity
if callbacks:
- self._spawn_delivery(self._connectivity, callbacks)
-
- def subscribe(self, callback, try_to_connect):
- with self._lock:
- if not self._callbacks_and_connectivities and not self._polling:
- polling_thread = threading.Thread(
- target=self._poll_connectivity,
- args=(self._low_channel, bool(try_to_connect)))
- polling_thread.start()
- self._polling = True
- self._callbacks_and_connectivities.append([callback, None])
- elif not self._delivering and self._connectivity is not None:
- self._spawn_delivery(self._connectivity, (callback,))
- self._try_to_connect |= bool(try_to_connect)
- self._callbacks_and_connectivities.append(
- [callback, self._connectivity])
- else:
- self._try_to_connect |= bool(try_to_connect)
- self._callbacks_and_connectivities.append([callback, None])
-
- def unsubscribe(self, callback):
- with self._lock:
- for index, (subscribed_callback, unused_connectivity) in enumerate(
- self._callbacks_and_connectivities):
- if callback == subscribed_callback:
- self._callbacks_and_connectivities.pop(index)
- break
-
- def low_channel(self):
- return self._low_channel
+ self._spawn_delivery(self._connectivity, callbacks)
+ completion_queue = _low.CompletionQueue()
+ while True:
+ low_channel.watch_connectivity_state(low_connectivity,
+ time.time() + 0.2,
+ completion_queue, None)
+ event = completion_queue.next()
+ with self._lock:
+ if not self._callbacks_and_connectivities and not self._try_to_connect:
+ self._polling = False
+ self._connectivity = None
+ completion_queue.shutdown()
+ break
+ try_to_connect = self._try_to_connect
+ self._try_to_connect = False
+ if event.success or try_to_connect:
+ low_connectivity = low_channel.check_connectivity_state(
+ try_to_connect)
+ with self._lock:
+ self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ low_connectivity]
+ if not self._delivering:
+ callbacks = self._deliveries(self._connectivity)
+ if callbacks:
+ self._spawn_delivery(self._connectivity, callbacks)
+
+ def subscribe(self, callback, try_to_connect):
+ with self._lock:
+ if not self._callbacks_and_connectivities and not self._polling:
+ polling_thread = threading.Thread(
+ target=self._poll_connectivity,
+ args=(self._low_channel, bool(try_to_connect)))
+ polling_thread.start()
+ self._polling = True
+ self._callbacks_and_connectivities.append([callback, None])
+ elif not self._delivering and self._connectivity is not None:
+ self._spawn_delivery(self._connectivity, (callback,))
+ self._try_to_connect |= bool(try_to_connect)
+ self._callbacks_and_connectivities.append(
+ [callback, self._connectivity])
+ else:
+ self._try_to_connect |= bool(try_to_connect)
+ self._callbacks_and_connectivities.append([callback, None])
+
+ def unsubscribe(self, callback):
+ with self._lock:
+ for index, (subscribed_callback, unused_connectivity
+ ) in enumerate(self._callbacks_and_connectivities):
+ if callback == subscribed_callback:
+ self._callbacks_and_connectivities.pop(index)
+ break
+
+ def low_channel(self):
+ return self._low_channel
diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py
index cca4a1797a..bb7c0960d5 100644
--- a/src/python/grpcio/grpc/beta/_server_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_server_adaptations.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
@@ -47,329 +46,352 @@ _DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
- def __init__(self, servicer_context):
- self._servicer_context = servicer_context
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
- def peer(self):
- return self._servicer_context.peer()
+ def peer(self):
+ return self._servicer_context.peer()
- def disable_next_response_compression(self):
- pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+ def disable_next_response_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
- def __init__(self, servicer_context):
- self._servicer_context = servicer_context
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
- def is_active(self):
- return self._servicer_context.is_active()
+ def is_active(self):
+ return self._servicer_context.is_active()
- def time_remaining(self):
- return self._servicer_context.time_remaining()
+ def time_remaining(self):
+ return self._servicer_context.time_remaining()
- def add_abortion_callback(self, abortion_callback):
- raise NotImplementedError(
- 'add_abortion_callback no longer supported server-side!')
+ def add_abortion_callback(self, abortion_callback):
+ raise NotImplementedError(
+ 'add_abortion_callback no longer supported server-side!')
- def cancel(self):
- self._servicer_context.cancel()
+ def cancel(self):
+ self._servicer_context.cancel()
- def protocol_context(self):
- return _ServerProtocolContext(self._servicer_context)
+ def protocol_context(self):
+ return _ServerProtocolContext(self._servicer_context)
- def invocation_metadata(self):
- return _common.cygrpc_metadata(
- self._servicer_context.invocation_metadata())
+ def invocation_metadata(self):
+ return _common.cygrpc_metadata(
+ self._servicer_context.invocation_metadata())
- def initial_metadata(self, initial_metadata):
- self._servicer_context.send_initial_metadata(initial_metadata)
+ def initial_metadata(self, initial_metadata):
+ self._servicer_context.send_initial_metadata(initial_metadata)
- def terminal_metadata(self, terminal_metadata):
- self._servicer_context.set_terminal_metadata(terminal_metadata)
+ def terminal_metadata(self, terminal_metadata):
+ self._servicer_context.set_terminal_metadata(terminal_metadata)
- def code(self, code):
- self._servicer_context.set_code(code)
+ def code(self, code):
+ self._servicer_context.set_code(code)
- def details(self, details):
- self._servicer_context.set_details(details)
+ def details(self, details):
+ self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
- def adaptation(request, servicer_context):
- return unary_request_inline(request, _FaceServicerContext(servicer_context))
- return adaptation
+
+ def adaptation(request, servicer_context):
+ return unary_request_inline(request,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
def _adapt_stream_request_inline(stream_request_inline):
- def adaptation(request_iterator, servicer_context):
- return stream_request_inline(
- request_iterator, _FaceServicerContext(servicer_context))
- return adaptation
+
+ def adaptation(request_iterator, servicer_context):
+ return stream_request_inline(request_iterator,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
class _Callback(stream.Consumer):
- def __init__(self):
- self._condition = threading.Condition()
- self._values = []
- self._terminated = False
- self._cancelled = False
-
- def consume(self, value):
- with self._condition:
- self._values.append(value)
- self._condition.notify_all()
-
- def terminate(self):
- with self._condition:
- self._terminated = True
- self._condition.notify_all()
-
- def consume_and_terminate(self, value):
- with self._condition:
- self._values.append(value)
- self._terminated = True
- self._condition.notify_all()
-
- def cancel(self):
- with self._condition:
- self._cancelled = True
- self._condition.notify_all()
-
- def draw_one_value(self):
- with self._condition:
- while True:
- if self._cancelled:
- raise abandonment.Abandoned()
- elif self._values:
- return self._values.pop(0)
- elif self._terminated:
- return None
- else:
- self._condition.wait()
-
- def draw_all_values(self):
- with self._condition:
- while True:
- if self._cancelled:
- raise abandonment.Abandoned()
- elif self._terminated:
- all_values = tuple(self._values)
- self._values = None
- return all_values
- else:
- self._condition.wait()
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._terminated = False
+ self._cancelled = False
+
+ def consume(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify_all()
+
+ def terminate(self):
+ with self._condition:
+ self._terminated = True
+ self._condition.notify_all()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._terminated = True
+ self._condition.notify_all()
+
+ def cancel(self):
+ with self._condition:
+ self._cancelled = True
+ self._condition.notify_all()
+
+ def draw_one_value(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._values:
+ return self._values.pop(0)
+ elif self._terminated:
+ return None
+ else:
+ self._condition.wait()
+
+ def draw_all_values(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._terminated:
+ all_values = tuple(self._values)
+ self._values = None
+ return all_values
+ else:
+ self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
- thread_joined = threading.Event()
- def pipe_requests():
- for request in request_iterator:
- if not servicer_context.is_active() or thread_joined.is_set():
- return
- request_consumer.consume(request)
- if not servicer_context.is_active() or thread_joined.is_set():
- return
- request_consumer.terminate()
+ thread_joined = threading.Event()
+
+ def pipe_requests():
+ for request in request_iterator:
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.consume(request)
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.terminate()
- def stop_request_pipe(timeout):
- thread_joined.set()
+ def stop_request_pipe(timeout):
+ thread_joined.set()
- request_pipe_thread = _common.CleanupThread(
- stop_request_pipe, target=pipe_requests)
- request_pipe_thread.start()
+ request_pipe_thread = _common.CleanupThread(
+ stop_request_pipe, target=pipe_requests)
+ request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
- def adaptation(request, servicer_context):
- callback = _Callback()
- if not servicer_context.add_callback(callback.cancel):
- raise abandonment.Abandoned()
- unary_unary_event(
- request, callback.consume_and_terminate,
- _FaceServicerContext(servicer_context))
- return callback.draw_all_values()[0]
- return adaptation
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_unary_event(request, callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ return callback.draw_all_values()[0]
+
+ return adaptation
def _adapt_unary_stream_event(unary_stream_event):
- def adaptation(request, servicer_context):
- callback = _Callback()
- if not servicer_context.add_callback(callback.cancel):
- raise abandonment.Abandoned()
- unary_stream_event(
- request, callback, _FaceServicerContext(servicer_context))
- while True:
- response = callback.draw_one_value()
- if response is None:
- return
- else:
- yield response
- return adaptation
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_stream_event(request, callback,
+ _FaceServicerContext(servicer_context))
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
def _adapt_stream_unary_event(stream_unary_event):
- def adaptation(request_iterator, servicer_context):
- callback = _Callback()
- if not servicer_context.add_callback(callback.cancel):
- raise abandonment.Abandoned()
- request_consumer = stream_unary_event(
- callback.consume_and_terminate, _FaceServicerContext(servicer_context))
- _run_request_pipe_thread(
- request_iterator, request_consumer, servicer_context)
- return callback.draw_all_values()[0]
- return adaptation
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_unary_event(
+ callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ return callback.draw_all_values()[0]
+
+ return adaptation
def _adapt_stream_stream_event(stream_stream_event):
- def adaptation(request_iterator, servicer_context):
- callback = _Callback()
- if not servicer_context.add_callback(callback.cancel):
- raise abandonment.Abandoned()
- request_consumer = stream_stream_event(
- callback, _FaceServicerContext(servicer_context))
- _run_request_pipe_thread(
- request_iterator, request_consumer, servicer_context)
- while True:
- response = callback.draw_one_value()
- if response is None:
- return
- else:
- yield response
- return adaptation
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_stream_event(
+ callback, _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
class _SimpleMethodHandler(
- collections.namedtuple(
- '_MethodHandler',
- ('request_streaming', 'response_streaming', 'request_deserializer',
- 'response_serializer', 'unary_unary', 'unary_stream', 'stream_unary',
- 'stream_stream',)),
- grpc.RpcMethodHandler):
- pass
-
-
-def _simple_method_handler(
- implementation, request_deserializer, response_serializer):
- if implementation.style is style.Service.INLINE:
- if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
- return _SimpleMethodHandler(
- False, False, request_deserializer, response_serializer,
- _adapt_unary_request_inline(implementation.unary_unary_inline), None,
- None, None)
- elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
- return _SimpleMethodHandler(
- False, True, request_deserializer, response_serializer, None,
- _adapt_unary_request_inline(implementation.unary_stream_inline), None,
- None)
- elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
- return _SimpleMethodHandler(
- True, False, request_deserializer, response_serializer, None, None,
- _adapt_stream_request_inline(implementation.stream_unary_inline),
- None)
- elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
- return _SimpleMethodHandler(
- True, True, request_deserializer, response_serializer, None, None,
- None,
- _adapt_stream_request_inline(implementation.stream_stream_inline))
- elif implementation.style is style.Service.EVENT:
- if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
- return _SimpleMethodHandler(
- False, False, request_deserializer, response_serializer,
- _adapt_unary_unary_event(implementation.unary_unary_event), None,
- None, None)
- elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
- return _SimpleMethodHandler(
- False, True, request_deserializer, response_serializer, None,
- _adapt_unary_stream_event(implementation.unary_stream_event), None,
- None)
- elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
- return _SimpleMethodHandler(
- True, False, request_deserializer, response_serializer, None, None,
- _adapt_stream_unary_event(implementation.stream_unary_event), None)
- elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
- return _SimpleMethodHandler(
- True, True, request_deserializer, response_serializer, None, None,
- None, _adapt_stream_stream_event(implementation.stream_stream_event))
+ collections.namedtuple('_MethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',)), grpc.RpcMethodHandler):
+ pass
+
+
+def _simple_method_handler(implementation, request_deserializer,
+ response_serializer):
+ if implementation.style is style.Service.INLINE:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_request_inline(implementation.unary_unary_inline),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_request_inline(implementation.unary_stream_inline),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(True, False, request_deserializer,
+ response_serializer, None, None,
+ _adapt_stream_request_inline(
+ implementation.stream_unary_inline),
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_request_inline(
+ implementation.stream_stream_inline))
+ elif implementation.style is style.Service.EVENT:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_unary_event(implementation.unary_unary_event),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_stream_event(implementation.unary_stream_event),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None,
+ None,
+ _adapt_stream_unary_event(implementation.stream_unary_event),
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_stream_event(implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
- method_pair_map = method_pair_map or {}
- flat_map = {}
- for method_pair in method_pair_map:
- method = _common.fully_qualified_method(method_pair[0], method_pair[1])
- flat_map[method] = method_pair_map[method_pair]
- return flat_map
+ method_pair_map = method_pair_map or {}
+ flat_map = {}
+ for method_pair in method_pair_map:
+ method = _common.fully_qualified_method(method_pair[0], method_pair[1])
+ flat_map[method] = method_pair_map[method_pair]
+ return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
- def __init__(
- self, method_implementations, multi_method_implementation,
- request_deserializers, response_serializers):
- self._method_implementations = _flatten_method_pair_map(
- method_implementations)
- self._request_deserializers = _flatten_method_pair_map(
- request_deserializers)
- self._response_serializers = _flatten_method_pair_map(
- response_serializers)
- self._multi_method_implementation = multi_method_implementation
-
- def service(self, handler_call_details):
- method_implementation = self._method_implementations.get(
- handler_call_details.method)
- if method_implementation is not None:
- return _simple_method_handler(
- method_implementation,
- self._request_deserializers.get(handler_call_details.method),
- self._response_serializers.get(handler_call_details.method))
- elif self._multi_method_implementation is None:
- return None
- else:
- try:
- return None #TODO(nathaniel): call the multimethod.
- except face.NoSuchMethodError:
- return None
+ def __init__(self, method_implementations, multi_method_implementation,
+ request_deserializers, response_serializers):
+ self._method_implementations = _flatten_method_pair_map(
+ method_implementations)
+ self._request_deserializers = _flatten_method_pair_map(
+ request_deserializers)
+ self._response_serializers = _flatten_method_pair_map(
+ response_serializers)
+ self._multi_method_implementation = multi_method_implementation
+
+ def service(self, handler_call_details):
+ method_implementation = self._method_implementations.get(
+ handler_call_details.method)
+ if method_implementation is not None:
+ return _simple_method_handler(
+ method_implementation,
+ self._request_deserializers.get(handler_call_details.method),
+ self._response_serializers.get(handler_call_details.method))
+ elif self._multi_method_implementation is None:
+ return None
+ else:
+ try:
+ return None #TODO(nathaniel): call the multimethod.
+ except face.NoSuchMethodError:
+ return None
class _Server(interfaces.Server):
- def __init__(self, server):
- self._server = server
+ def __init__(self, server):
+ self._server = server
- def add_insecure_port(self, address):
- return self._server.add_insecure_port(address)
+ def add_insecure_port(self, address):
+ return self._server.add_insecure_port(address)
- def add_secure_port(self, address, server_credentials):
- return self._server.add_secure_port(address, server_credentials)
+ def add_secure_port(self, address, server_credentials):
+ return self._server.add_secure_port(address, server_credentials)
- def start(self):
- self._server.start()
+ def start(self):
+ self._server.start()
- def stop(self, grace):
- return self._server.stop(grace)
+ def stop(self, grace):
+ return self._server.stop(grace)
- def __enter__(self):
- self._server.start()
- return self
+ def __enter__(self):
+ self._server.start()
+ return self
- def __exit__(self, exc_type, exc_val, exc_tb):
- self._server.stop(None)
- return False
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._server.stop(None)
+ return False
-def server(
- service_implementations, multi_method_implementation, request_deserializers,
- response_serializers, thread_pool, thread_pool_size):
- generic_rpc_handler = _GenericRpcHandler(
- service_implementations, multi_method_implementation,
- request_deserializers, response_serializers)
- if thread_pool is None:
- effective_thread_pool = logging_pool.pool(
- _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size)
- else:
- effective_thread_pool = thread_pool
- return _Server(
- grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
+def server(service_implementations, multi_method_implementation,
+ request_deserializers, response_serializers, thread_pool,
+ thread_pool_size):
+ generic_rpc_handler = _GenericRpcHandler(
+ service_implementations, multi_method_implementation,
+ request_deserializers, response_serializers)
+ if thread_pool is None:
+ effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
+ if thread_pool_size is None
+ else thread_pool_size)
+ else:
+ effective_thread_pool = thread_pool
+ return _Server(
+ grpc.server(
+ effective_thread_pool, handlers=(generic_rpc_handler,)))
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index ab25fd5eec..7093852278 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Entry points into the Beta API of gRPC Python."""
# threading is referenced from specification in this module.
@@ -43,7 +42,6 @@ from grpc.beta import interfaces
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.interfaces.face import face # pylint: disable=unused-import
-
ChannelCredentials = grpc.ChannelCredentials
ssl_channel_credentials = grpc.ssl_channel_credentials
CallCredentials = grpc.CallCredentials
@@ -51,7 +49,7 @@ metadata_call_credentials = grpc.metadata_call_credentials
def google_call_credentials(credentials):
- """Construct CallCredentials from GoogleCredentials.
+ """Construct CallCredentials from GoogleCredentials.
Args:
credentials: A GoogleCredentials object from the oauth2client library.
@@ -59,7 +57,8 @@ def google_call_credentials(credentials):
Returns:
A CallCredentials object for use in a GRPCCallOptions object.
"""
- return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
+ return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
+
access_token_call_credentials = grpc.access_token_call_credentials
composite_call_credentials = grpc.composite_call_credentials
@@ -67,18 +66,18 @@ composite_channel_credentials = grpc.composite_channel_credentials
class Channel(object):
- """A channel to a remote host through which RPCs may be conducted.
+ """A channel to a remote host through which RPCs may be conducted.
Only the "subscribe" and "unsubscribe" methods are supported for application
use. This class' instance constructor and all other attributes are
unsupported.
"""
- def __init__(self, channel):
- self._channel = channel
+ def __init__(self, channel):
+ self._channel = channel
- def subscribe(self, callback, try_to_connect=None):
- """Subscribes to this Channel's connectivity.
+ def subscribe(self, callback, try_to_connect=None):
+ """Subscribes to this Channel's connectivity.
Args:
callback: A callable to be invoked and passed an
@@ -90,20 +89,20 @@ class Channel(object):
attempt to connect if it is not already connected and ready to conduct
RPCs.
"""
- self._channel.subscribe(callback, try_to_connect=try_to_connect)
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
- def unsubscribe(self, callback):
- """Unsubscribes a callback from this Channel's connectivity.
+ def unsubscribe(self, callback):
+ """Unsubscribes a callback from this Channel's connectivity.
Args:
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
- self._channel.unsubscribe(callback)
+ self._channel.unsubscribe(callback)
def insecure_channel(host, port):
- """Creates an insecure Channel to a remote host.
+ """Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
@@ -113,13 +112,13 @@ def insecure_channel(host, port):
Returns:
A Channel to the remote host through which RPCs may be conducted.
"""
- channel = grpc.insecure_channel(
- host if port is None else '%s:%d' % (host, port))
- return Channel(channel)
+ channel = grpc.insecure_channel(host
+ if port is None else '%s:%d' % (host, port))
+ return Channel(channel)
def secure_channel(host, port, channel_credentials):
- """Creates a secure Channel to a remote host.
+ """Creates a secure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
@@ -130,37 +129,39 @@ def secure_channel(host, port, channel_credentials):
Returns:
A secure Channel to the remote host through which RPCs may be conducted.
"""
- channel = grpc.secure_channel(
- host if port is None else '%s:%d' % (host, port), channel_credentials)
- return Channel(channel)
+ channel = grpc.secure_channel(host if port is None else
+ '%s:%d' % (host, port), channel_credentials)
+ return Channel(channel)
class StubOptions(object):
- """A value encapsulating the various options for creation of a Stub.
+ """A value encapsulating the various options for creation of a Stub.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
- def __init__(
- self, host, request_serializers, response_deserializers,
- metadata_transformer, thread_pool, thread_pool_size):
- self.host = host
- self.request_serializers = request_serializers
- self.response_deserializers = response_deserializers
- self.metadata_transformer = metadata_transformer
- self.thread_pool = thread_pool
- self.thread_pool_size = thread_pool_size
+ def __init__(self, host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size):
+ self.host = host
+ self.request_serializers = request_serializers
+ self.response_deserializers = response_deserializers
+ self.metadata_transformer = metadata_transformer
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
-_EMPTY_STUB_OPTIONS = StubOptions(
- None, None, None, None, None, None)
+_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None)
-def stub_options(
- host=None, request_serializers=None, response_deserializers=None,
- metadata_transformer=None, thread_pool=None, thread_pool_size=None):
- """Creates a StubOptions value to be passed at stub creation.
+
+def stub_options(host=None,
+ request_serializers=None,
+ response_deserializers=None,
+ metadata_transformer=None,
+ thread_pool=None,
+ thread_pool_size=None):
+ """Creates a StubOptions value to be passed at stub creation.
All parameters are optional and should always be passed by keyword.
@@ -180,13 +181,12 @@ def stub_options(
Returns:
A StubOptions value created from the passed parameters.
"""
- return StubOptions(
- host, request_serializers, response_deserializers,
- metadata_transformer, thread_pool, thread_pool_size)
+ return StubOptions(host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size)
def generic_stub(channel, options=None):
- """Creates a face.GenericStub on which RPCs can be made.
+ """Creates a face.GenericStub on which RPCs can be made.
Args:
channel: A Channel for use by the created stub.
@@ -195,16 +195,17 @@ def generic_stub(channel, options=None):
Returns:
A face.GenericStub on which RPCs can be made.
"""
- effective_options = _EMPTY_STUB_OPTIONS if options is None else options
- return _client_adaptations.generic_stub(
- channel._channel, # pylint: disable=protected-access
- effective_options.host, effective_options.metadata_transformer,
- effective_options.request_serializers,
- effective_options.response_deserializers)
+ effective_options = _EMPTY_STUB_OPTIONS if options is None else options
+ return _client_adaptations.generic_stub(
+ channel._channel, # pylint: disable=protected-access
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
def dynamic_stub(channel, service, cardinalities, options=None):
- """Creates a face.DynamicStub with which RPCs can be invoked.
+ """Creates a face.DynamicStub with which RPCs can be invoked.
Args:
channel: A Channel for the returned face.DynamicStub to use.
@@ -217,13 +218,15 @@ def dynamic_stub(channel, service, cardinalities, options=None):
Returns:
A face.DynamicStub with which RPCs can be invoked.
"""
- effective_options = StubOptions() if options is None else options
- return _client_adaptations.dynamic_stub(
- channel._channel, # pylint: disable=protected-access
- service, cardinalities, effective_options.host,
- effective_options.metadata_transformer,
- effective_options.request_serializers,
- effective_options.response_deserializers)
+ effective_options = StubOptions() if options is None else options
+ return _client_adaptations.dynamic_stub(
+ channel._channel, # pylint: disable=protected-access
+ service,
+ cardinalities,
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
ServerCredentials = grpc.ServerCredentials
@@ -231,34 +234,36 @@ ssl_server_credentials = grpc.ssl_server_credentials
class ServerOptions(object):
- """A value encapsulating the various options for creation of a Server.
+ """A value encapsulating the various options for creation of a Server.
This class and its instances have no supported interface - it exists to define
the type of its instances and its instances exist to be passed to other
functions.
"""
- def __init__(
- self, multi_method_implementation, request_deserializers,
- response_serializers, thread_pool, thread_pool_size, default_timeout,
- maximum_timeout):
- self.multi_method_implementation = multi_method_implementation
- self.request_deserializers = request_deserializers
- self.response_serializers = response_serializers
- self.thread_pool = thread_pool
- self.thread_pool_size = thread_pool_size
- self.default_timeout = default_timeout
- self.maximum_timeout = maximum_timeout
+ def __init__(self, multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout):
+ self.multi_method_implementation = multi_method_implementation
+ self.request_deserializers = request_deserializers
+ self.response_serializers = response_serializers
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
+ self.default_timeout = default_timeout
+ self.maximum_timeout = maximum_timeout
+
-_EMPTY_SERVER_OPTIONS = ServerOptions(
- None, None, None, None, None, None, None)
+_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None)
-def server_options(
- multi_method_implementation=None, request_deserializers=None,
- response_serializers=None, thread_pool=None, thread_pool_size=None,
- default_timeout=None, maximum_timeout=None):
- """Creates a ServerOptions value to be passed at server creation.
+def server_options(multi_method_implementation=None,
+ request_deserializers=None,
+ response_serializers=None,
+ thread_pool=None,
+ thread_pool_size=None,
+ default_timeout=None,
+ maximum_timeout=None):
+ """Creates a ServerOptions value to be passed at server creation.
All parameters are optional and should always be passed by keyword.
@@ -282,13 +287,13 @@ def server_options(
Returns:
A StubOptions value created from the passed parameters.
"""
- return ServerOptions(
- multi_method_implementation, request_deserializers, response_serializers,
- thread_pool, thread_pool_size, default_timeout, maximum_timeout)
+ return ServerOptions(multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout)
def server(service_implementations, options=None):
- """Creates an interfaces.Server with which RPCs can be serviced.
+ """Creates an interfaces.Server with which RPCs can be serviced.
Args:
service_implementations: A dictionary from service name-method name pair to
@@ -299,9 +304,9 @@ def server(service_implementations, options=None):
Returns:
An interfaces.Server with which RPCs can be serviced.
"""
- effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
- return _server_adaptations.server(
- service_implementations, effective_options.multi_method_implementation,
- effective_options.request_deserializers,
- effective_options.response_serializers, effective_options.thread_pool,
- effective_options.thread_pool_size)
+ effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
+ return _server_adaptations.server(
+ service_implementations, effective_options.multi_method_implementation,
+ effective_options.request_deserializers,
+ effective_options.response_serializers, effective_options.thread_pool,
+ effective_options.thread_pool_size)
diff --git a/src/python/grpcio/grpc/beta/interfaces.py b/src/python/grpcio/grpc/beta/interfaces.py
index 90f6bbbfcc..361d1bcffe 100644
--- a/src/python/grpcio/grpc/beta/interfaces.py
+++ b/src/python/grpcio/grpc/beta/interfaces.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Constants and interfaces of the Beta API of gRPC Python."""
import abc
@@ -43,21 +42,21 @@ StatusCode = grpc.StatusCode
class GRPCCallOptions(object):
- """A value encapsulating gRPC-specific options passed on RPC invocation.
+ """A value encapsulating gRPC-specific options passed on RPC invocation.
This class and its instances have no supported interface - it exists to
define the type of its instances and its instances exist to be passed to
other functions.
"""
- def __init__(self, disable_compression, subcall_of, credentials):
- self.disable_compression = disable_compression
- self.subcall_of = subcall_of
- self.credentials = credentials
+ def __init__(self, disable_compression, subcall_of, credentials):
+ self.disable_compression = disable_compression
+ self.subcall_of = subcall_of
+ self.credentials = credentials
def grpc_call_options(disable_compression=False, credentials=None):
- """Creates a GRPCCallOptions value to be passed at RPC invocation.
+ """Creates a GRPCCallOptions value to be passed at RPC invocation.
All parameters are optional and should always be passed by keyword.
@@ -67,7 +66,8 @@ def grpc_call_options(disable_compression=False, credentials=None):
request-unary RPCs.
credentials: A CallCredentials object to use for the invoked RPC.
"""
- return GRPCCallOptions(disable_compression, None, credentials)
+ return GRPCCallOptions(disable_compression, None, credentials)
+
GRPCAuthMetadataContext = grpc.AuthMetadataContext
GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback
@@ -75,38 +75,38 @@ GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin
class GRPCServicerContext(six.with_metaclass(abc.ABCMeta)):
- """Exposes gRPC-specific options and behaviors to code servicing RPCs."""
+ """Exposes gRPC-specific options and behaviors to code servicing RPCs."""
- @abc.abstractmethod
- def peer(self):
- """Identifies the peer that invoked the RPC being serviced.
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
Returns:
A string identifying the peer that invoked the RPC being serviced.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def disable_next_response_compression(self):
- """Disables compression of the next response passed by the application."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def disable_next_response_compression(self):
+ """Disables compression of the next response passed by the application."""
+ raise NotImplementedError()
class GRPCInvocationContext(six.with_metaclass(abc.ABCMeta)):
- """Exposes gRPC-specific options and behaviors to code invoking RPCs."""
+ """Exposes gRPC-specific options and behaviors to code invoking RPCs."""
- @abc.abstractmethod
- def disable_next_request_compression(self):
- """Disables compression of the next request passed by the application."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def disable_next_request_compression(self):
+ """Disables compression of the next request passed by the application."""
+ raise NotImplementedError()
class Server(six.with_metaclass(abc.ABCMeta)):
- """Services RPCs."""
+ """Services RPCs."""
- @abc.abstractmethod
- def add_insecure_port(self, address):
- """Reserves a port for insecure RPC service once this Server becomes active.
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Reserves a port for insecure RPC service once this Server becomes active.
This method may only be called before calling this Server's start method is
called.
@@ -120,11 +120,11 @@ class Server(six.with_metaclass(abc.ABCMeta)):
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_secure_port(self, address, server_credentials):
- """Reserves a port for secure RPC service after this Server becomes active.
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Reserves a port for secure RPC service after this Server becomes active.
This method may only be called before calling this Server's start method is
called.
@@ -139,20 +139,20 @@ class Server(six.with_metaclass(abc.ABCMeta)):
in the passed address, but will likely be different if the port number
contained in the passed address was zero.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def start(self):
- """Starts this Server's service of RPCs.
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server's service of RPCs.
This method may only be called while the server is not serving RPCs (i.e. it
is not idempotent).
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stop(self, grace):
- """Stops this Server's service of RPCs.
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server's service of RPCs.
All calls to this method immediately stop service of new RPCs. When existing
RPCs are aborted is controlled by the grace period parameter passed to this
@@ -177,4 +177,4 @@ class Server(six.with_metaclass(abc.ABCMeta)):
at the time it was stopped or if all RPCs that it had underway completed
very early in the grace period).
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/beta/utilities.py b/src/python/grpcio/grpc/beta/utilities.py
index fb07a76579..60525350a7 100644
--- a/src/python/grpcio/grpc/beta/utilities.py
+++ b/src/python/grpcio/grpc/beta/utilities.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for the gRPC Python Beta API."""
import threading
@@ -44,107 +43,107 @@ _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
class _ChannelReadyFuture(future.Future):
- def __init__(self, channel):
- self._condition = threading.Condition()
- self._channel = channel
-
- self._matured = False
- self._cancelled = False
- self._done_callbacks = []
-
- def _block(self, timeout):
- until = None if timeout is None else time.time() + timeout
- with self._condition:
- while True:
- if self._cancelled:
- raise future.CancelledError()
- elif self._matured:
- return
- else:
- if until is None:
- self._condition.wait()
- else:
- remaining = until - time.time()
- if remaining < 0:
- raise future.TimeoutError()
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise future.CancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise future.TimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is interfaces.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
else:
- self._condition.wait(timeout=remaining)
-
- def _update(self, connectivity):
- with self._condition:
- if (not self._cancelled and
- connectivity is interfaces.ChannelConnectivity.READY):
- self._matured = True
- self._channel.unsubscribe(self._update)
- self._condition.notify_all()
- done_callbacks = tuple(self._done_callbacks)
- self._done_callbacks = None
- else:
- return
-
- for done_callback in done_callbacks:
- callable_util.call_logging_exceptions(
- done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
-
- def cancel(self):
- with self._condition:
- if not self._matured:
- self._cancelled = True
- self._channel.unsubscribe(self._update)
- self._condition.notify_all()
- done_callbacks = tuple(self._done_callbacks)
- self._done_callbacks = None
- else:
- return False
-
- for done_callback in done_callbacks:
- callable_util.call_logging_exceptions(
- done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
-
- def cancelled(self):
- with self._condition:
- return self._cancelled
-
- def running(self):
- with self._condition:
- return not self._cancelled and not self._matured
-
- def done(self):
- with self._condition:
- return self._cancelled or self._matured
-
- def result(self, timeout=None):
- self._block(timeout)
- return None
-
- def exception(self, timeout=None):
- self._block(timeout)
- return None
-
- def traceback(self, timeout=None):
- self._block(timeout)
- return None
-
- def add_done_callback(self, fn):
- with self._condition:
- if not self._cancelled and not self._matured:
- self._done_callbacks.append(fn)
- return
-
- fn(self)
-
- def start(self):
- with self._condition:
- self._channel.subscribe(self._update, try_to_connect=True)
-
- def __del__(self):
- with self._condition:
- if not self._cancelled and not self._matured:
- self._channel.unsubscribe(self._update)
+ return False
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
def channel_ready_future(channel):
- """Creates a future.Future tracking when an implementations.Channel is ready.
+ """Creates a future.Future tracking when an implementations.Channel is ready.
Cancelling the returned future.Future does not tell the given
implementations.Channel to abandon attempts it may have been making to
@@ -158,7 +157,6 @@ def channel_ready_future(channel):
A future.Future that matures when the given Channel has connectivity
interfaces.ChannelConnectivity.READY.
"""
- ready_future = _ChannelReadyFuture(channel)
- ready_future.start()
- return ready_future
-
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/src/python/grpcio/grpc/framework/__init__.py b/src/python/grpcio/grpc/framework/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/__init__.py
+++ b/src/python/grpcio/grpc/framework/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/common/__init__.py b/src/python/grpcio/grpc/framework/common/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/common/__init__.py
+++ b/src/python/grpcio/grpc/framework/common/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/common/cardinality.py b/src/python/grpcio/grpc/framework/common/cardinality.py
index 610425e803..d8927cf9b0 100644
--- a/src/python/grpcio/grpc/framework/common/cardinality.py
+++ b/src/python/grpcio/grpc/framework/common/cardinality.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Defines an enum for classifying RPC methods by streaming semantics."""
import enum
@@ -34,9 +33,9 @@ import enum
@enum.unique
class Cardinality(enum.Enum):
- """Describes the streaming semantics of an RPC method."""
+ """Describes the streaming semantics of an RPC method."""
- UNARY_UNARY = 'request-unary/response-unary'
- UNARY_STREAM = 'request-unary/response-streaming'
- STREAM_UNARY = 'request-streaming/response-unary'
- STREAM_STREAM = 'request-streaming/response-streaming'
+ UNARY_UNARY = 'request-unary/response-unary'
+ UNARY_STREAM = 'request-unary/response-streaming'
+ STREAM_UNARY = 'request-streaming/response-unary'
+ STREAM_STREAM = 'request-streaming/response-streaming'
diff --git a/src/python/grpcio/grpc/framework/common/style.py b/src/python/grpcio/grpc/framework/common/style.py
index 6ae694bdcb..43f4211145 100644
--- a/src/python/grpcio/grpc/framework/common/style.py
+++ b/src/python/grpcio/grpc/framework/common/style.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Defines an enum for classifying RPC methods by control flow semantics."""
import enum
@@ -34,7 +33,7 @@ import enum
@enum.unique
class Service(enum.Enum):
- """Describes the control flow style of RPC method implementation."""
+ """Describes the control flow style of RPC method implementation."""
- INLINE = 'inline'
- EVENT = 'event'
+ INLINE = 'inline'
+ EVENT = 'event'
diff --git a/src/python/grpcio/grpc/framework/foundation/__init__.py b/src/python/grpcio/grpc/framework/foundation/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/foundation/__init__.py
+++ b/src/python/grpcio/grpc/framework/foundation/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/foundation/abandonment.py b/src/python/grpcio/grpc/framework/foundation/abandonment.py
index 960b4d06b4..32385b9657 100644
--- a/src/python/grpcio/grpc/framework/foundation/abandonment.py
+++ b/src/python/grpcio/grpc/framework/foundation/abandonment.py
@@ -26,12 +26,11 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for indicating abandonment of computation."""
class Abandoned(Exception):
- """Indicates that some computation is being abandoned.
+ """Indicates that some computation is being abandoned.
Abandoning a computation is different than returning a value or raising
an exception indicating some operational or programming defect.
diff --git a/src/python/grpcio/grpc/framework/foundation/callable_util.py b/src/python/grpcio/grpc/framework/foundation/callable_util.py
index 4f029f97bb..3b8351c19b 100644
--- a/src/python/grpcio/grpc/framework/foundation/callable_util.py
+++ b/src/python/grpcio/grpc/framework/foundation/callable_util.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for working with callables."""
import abc
@@ -39,7 +38,7 @@ import six
class Outcome(six.with_metaclass(abc.ABCMeta)):
- """A sum type describing the outcome of some call.
+ """A sum type describing the outcome of some call.
Attributes:
kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
@@ -50,31 +49,31 @@ class Outcome(six.with_metaclass(abc.ABCMeta)):
Kind.RAISED.
"""
- @enum.unique
- class Kind(enum.Enum):
- """Identifies the general kind of the outcome of some call."""
+ @enum.unique
+ class Kind(enum.Enum):
+ """Identifies the general kind of the outcome of some call."""
- RETURNED = object()
- RAISED = object()
+ RETURNED = object()
+ RAISED = object()
class _EasyOutcome(
- collections.namedtuple(
- '_EasyOutcome', ['kind', 'return_value', 'exception']),
- Outcome):
- """A trivial implementation of Outcome."""
+ collections.namedtuple('_EasyOutcome',
+ ['kind', 'return_value', 'exception']), Outcome):
+ """A trivial implementation of Outcome."""
def _call_logging_exceptions(behavior, message, *args, **kwargs):
- try:
- return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs), None)
- except Exception as e: # pylint: disable=broad-except
- logging.exception(message)
- return _EasyOutcome(Outcome.Kind.RAISED, None, e)
+ try:
+ return _EasyOutcome(Outcome.Kind.RETURNED,
+ behavior(*args, **kwargs), None)
+ except Exception as e: # pylint: disable=broad-except
+ logging.exception(message)
+ return _EasyOutcome(Outcome.Kind.RAISED, None, e)
def with_exceptions_logged(behavior, message):
- """Wraps a callable in a try-except that logs any exceptions it raises.
+ """Wraps a callable in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
@@ -86,14 +85,16 @@ def with_exceptions_logged(behavior, message):
future.Outcome describing whether the given behavior returned a value or
raised an exception.
"""
- @functools.wraps(behavior)
- def wrapped_behavior(*args, **kwargs):
- return _call_logging_exceptions(behavior, message, *args, **kwargs)
- return wrapped_behavior
+
+ @functools.wraps(behavior)
+ def wrapped_behavior(*args, **kwargs):
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
+
+ return wrapped_behavior
def call_logging_exceptions(behavior, message, *args, **kwargs):
- """Calls a behavior in a try-except that logs any exceptions it raises.
+ """Calls a behavior in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
@@ -105,4 +106,4 @@ def call_logging_exceptions(behavior, message, *args, **kwargs):
An Outcome describing whether the given behavior returned a value or raised
an exception.
"""
- return _call_logging_exceptions(behavior, message, *args, **kwargs)
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
diff --git a/src/python/grpcio/grpc/framework/foundation/future.py b/src/python/grpcio/grpc/framework/foundation/future.py
index 6fb58eadb6..e2ecf62921 100644
--- a/src/python/grpcio/grpc/framework/foundation/future.py
+++ b/src/python/grpcio/grpc/framework/foundation/future.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""A Future interface.
Python doesn't have a Future interface in its standard library. In the absence
@@ -53,33 +52,33 @@ import six
class TimeoutError(Exception):
- """Indicates that a particular call timed out."""
+ """Indicates that a particular call timed out."""
class CancelledError(Exception):
- """Indicates that the computation underlying a Future was cancelled."""
+ """Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
- """A representation of a computation in another control flow.
+ """A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
- # NOTE(nathaniel): This isn't the return type that I would want to have if it
- # were up to me. Were this interface being written from scratch, the return
- # type of this method would probably be a sum type like:
- #
- # NOT_COMMENCED
- # COMMENCED_AND_NOT_COMPLETED
- # PARTIAL_RESULT<Partial_Result_Type>
- # COMPLETED<Result_Type>
- # UNCANCELLABLE
- # NOT_IMMEDIATELY_DETERMINABLE
- @abc.abstractmethod
- def cancel(self):
- """Attempts to cancel the computation.
+ # NOTE(nathaniel): This isn't the return type that I would want to have if it
+ # were up to me. Were this interface being written from scratch, the return
+ # type of this method would probably be a sum type like:
+ #
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
This method does not block.
@@ -92,25 +91,25 @@ class Future(six.with_metaclass(abc.ABCMeta)):
remote system for which a determination of whether or not it commenced
before being cancelled cannot be made without blocking.
"""
- raise NotImplementedError()
-
- # NOTE(nathaniel): Here too this isn't the return type that I'd want this
- # method to have if it were up to me. I think I'd go with another sum type
- # like:
- #
- # NOT_CANCELLED (this object's cancel method hasn't been called)
- # NOT_COMMENCED
- # COMMENCED_AND_NOT_COMPLETED
- # PARTIAL_RESULT<Partial_Result_Type>
- # COMPLETED<Result_Type>
- # UNCANCELLABLE
- # NOT_IMMEDIATELY_DETERMINABLE
- #
- # Notice how giving the cancel method the right semantics obviates most
- # reasons for this method to exist.
- @abc.abstractmethod
- def cancelled(self):
- """Describes whether the computation was cancelled.
+ raise NotImplementedError()
+
+ # NOTE(nathaniel): Here too this isn't the return type that I'd want this
+ # method to have if it were up to me. I think I'd go with another sum type
+ # like:
+ #
+ # NOT_CANCELLED (this object's cancel method hasn't been called)
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ #
+ # Notice how giving the cancel method the right semantics obviates most
+ # reasons for this method to exist.
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
This method does not block.
@@ -120,11 +119,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
not limited to this object's cancel method not having been called and
the computation's result having become immediately available.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def running(self):
- """Describes whether the computation is taking place.
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
This method does not block.
@@ -133,15 +132,15 @@ class Future(six.with_metaclass(abc.ABCMeta)):
taking place now, or False if the computation took place in the past or
was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- # NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
- # would rather this only returned True in cases in which the underlying
- # computation completed successfully. A computation's having been cancelled
- # conflicts with considering that computation "done".
- @abc.abstractmethod
- def done(self):
- """Describes whether the computation has taken place.
+ # NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
+ # would rather this only returned True in cases in which the underlying
+ # computation completed successfully. A computation's having been cancelled
+ # conflicts with considering that computation "done".
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
This method does not block.
@@ -150,11 +149,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
unscheduled or interrupted. False if the computation may possibly be
executing or scheduled to execute later.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def result(self, timeout=None):
- """Accesses the outcome of the computation or raises its exception.
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Accesses the outcome of the computation or raises its exception.
This method may return immediately or may block.
@@ -173,11 +172,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def exception(self, timeout=None):
- """Return the exception raised by the computation.
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
This method may return immediately or may block.
@@ -196,11 +195,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def traceback(self, timeout=None):
- """Access the traceback of the exception raised by the computation.
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
@@ -219,11 +218,11 @@ class Future(six.with_metaclass(abc.ABCMeta)):
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_done_callback(self, fn):
- """Adds a function to be called at completion of the computation.
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
@@ -234,4 +233,4 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Args:
fn: A callable taking this Future object as its single parameter.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/foundation/logging_pool.py b/src/python/grpcio/grpc/framework/foundation/logging_pool.py
index 9b469a1452..9164173d34 100644
--- a/src/python/grpcio/grpc/framework/foundation/logging_pool.py
+++ b/src/python/grpcio/grpc/framework/foundation/logging_pool.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""A thread pool that logs exceptions raised by tasks executed within it."""
import logging
@@ -35,42 +34,46 @@ from concurrent import futures
def _wrap(behavior):
- """Wraps an arbitrary callable behavior in exception-logging."""
- def _wrapping(*args, **kwargs):
- try:
- return behavior(*args, **kwargs)
- except Exception as e:
- logging.exception(
- 'Unexpected exception from %s executed in logging pool!', behavior)
- raise
- return _wrapping
+ """Wraps an arbitrary callable behavior in exception-logging."""
+
+ def _wrapping(*args, **kwargs):
+ try:
+ return behavior(*args, **kwargs)
+ except Exception as e:
+ logging.exception(
+ 'Unexpected exception from %s executed in logging pool!',
+ behavior)
+ raise
+
+ return _wrapping
class _LoggingPool(object):
- """An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
+ """An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
- def __init__(self, backing_pool):
- self._backing_pool = backing_pool
+ def __init__(self, backing_pool):
+ self._backing_pool = backing_pool
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, exc_type, exc_val, exc_tb):
- self._backing_pool.shutdown(wait=True)
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._backing_pool.shutdown(wait=True)
- def submit(self, fn, *args, **kwargs):
- return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
+ def submit(self, fn, *args, **kwargs):
+ return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
- def map(self, func, *iterables, **kwargs):
- return self._backing_pool.map(
- _wrap(func), *iterables, timeout=kwargs.get('timeout', None))
+ def map(self, func, *iterables, **kwargs):
+ return self._backing_pool.map(_wrap(func),
+ *iterables,
+ timeout=kwargs.get('timeout', None))
- def shutdown(self, wait=True):
- self._backing_pool.shutdown(wait=wait)
+ def shutdown(self, wait=True):
+ self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
- """Creates a thread pool that logs exceptions raised by the tasks within it.
+ """Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
@@ -79,4 +82,4 @@ def pool(max_workers):
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
- return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
+ return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
diff --git a/src/python/grpcio/grpc/framework/foundation/stream.py b/src/python/grpcio/grpc/framework/foundation/stream.py
index ddd6cc496a..2529a6944b 100644
--- a/src/python/grpcio/grpc/framework/foundation/stream.py
+++ b/src/python/grpcio/grpc/framework/foundation/stream.py
@@ -26,35 +26,35 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Interfaces related to streams of values or objects."""
import abc
import six
+
class Consumer(six.with_metaclass(abc.ABCMeta)):
- """Interface for consumers of finite streams of values or objects."""
+ """Interface for consumers of finite streams of values or objects."""
- @abc.abstractmethod
- def consume(self, value):
- """Accepts a value.
+ @abc.abstractmethod
+ def consume(self, value):
+ """Accepts a value.
Args:
value: Any value accepted by this Consumer.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def terminate(self):
- """Indicates to this Consumer that no more values will be supplied."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates to this Consumer that no more values will be supplied."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def consume_and_terminate(self, value):
- """Supplies a value and signals that no more values will be supplied.
+ @abc.abstractmethod
+ def consume_and_terminate(self, value):
+ """Supplies a value and signals that no more values will be supplied.
Args:
value: Any value accepted by this Consumer.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/foundation/stream_util.py b/src/python/grpcio/grpc/framework/foundation/stream_util.py
index a6f234f1fe..6b356f176f 100644
--- a/src/python/grpcio/grpc/framework/foundation/stream_util.py
+++ b/src/python/grpcio/grpc/framework/foundation/stream_util.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Helpful utilities related to the stream module."""
import logging
@@ -38,126 +37,126 @@ _NO_VALUE = object()
class TransformingConsumer(stream.Consumer):
- """A stream.Consumer that passes a transformation of its input to another."""
+ """A stream.Consumer that passes a transformation of its input to another."""
- def __init__(self, transformation, downstream):
- self._transformation = transformation
- self._downstream = downstream
+ def __init__(self, transformation, downstream):
+ self._transformation = transformation
+ self._downstream = downstream
- def consume(self, value):
- self._downstream.consume(self._transformation(value))
+ def consume(self, value):
+ self._downstream.consume(self._transformation(value))
- def terminate(self):
- self._downstream.terminate()
+ def terminate(self):
+ self._downstream.terminate()
- def consume_and_terminate(self, value):
- self._downstream.consume_and_terminate(self._transformation(value))
+ def consume_and_terminate(self, value):
+ self._downstream.consume_and_terminate(self._transformation(value))
class IterableConsumer(stream.Consumer):
- """A Consumer that when iterated over emits the values it has consumed."""
-
- def __init__(self):
- self._condition = threading.Condition()
- self._values = []
- self._active = True
-
- def consume(self, stock_reply):
- with self._condition:
- if self._active:
- self._values.append(stock_reply)
- self._condition.notify()
-
- def terminate(self):
- with self._condition:
- self._active = False
- self._condition.notify()
-
- def consume_and_terminate(self, stock_reply):
- with self._condition:
- if self._active:
- self._values.append(stock_reply)
- self._active = False
- self._condition.notify()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self.next()
-
- def next(self):
- with self._condition:
- while self._active and not self._values:
- self._condition.wait()
- if self._values:
- return self._values.pop(0)
- else:
- raise StopIteration()
+ """A Consumer that when iterated over emits the values it has consumed."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._active = True
+
+ def consume(self, stock_reply):
+ with self._condition:
+ if self._active:
+ self._values.append(stock_reply)
+ self._condition.notify()
+
+ def terminate(self):
+ with self._condition:
+ self._active = False
+ self._condition.notify()
+
+ def consume_and_terminate(self, stock_reply):
+ with self._condition:
+ if self._active:
+ self._values.append(stock_reply)
+ self._active = False
+ self._condition.notify()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self.next()
+
+ def next(self):
+ with self._condition:
+ while self._active and not self._values:
+ self._condition.wait()
+ if self._values:
+ return self._values.pop(0)
+ else:
+ raise StopIteration()
class ThreadSwitchingConsumer(stream.Consumer):
- """A Consumer decorator that affords serialization and asynchrony."""
-
- def __init__(self, sink, pool):
- self._lock = threading.Lock()
- self._sink = sink
- self._pool = pool
- # True if self._spin has been submitted to the pool to be called once and
- # that call has not yet returned, False otherwise.
- self._spinning = False
- self._values = []
- self._active = True
-
- def _spin(self, sink, value, terminate):
- while True:
- try:
- if value is _NO_VALUE:
- sink.terminate()
- elif terminate:
- sink.consume_and_terminate(value)
- else:
- sink.consume(value)
- except Exception as e: # pylint:disable=broad-except
- logging.exception(e)
-
- with self._lock:
- if terminate:
- self._spinning = False
- return
- elif self._values:
- value = self._values.pop(0)
- terminate = not self._values and not self._active
- elif not self._active:
- value = _NO_VALUE
- terminate = True
- else:
- self._spinning = False
- return
-
- def consume(self, value):
- with self._lock:
- if self._active:
- if self._spinning:
- self._values.append(value)
- else:
- self._pool.submit(self._spin, self._sink, value, False)
- self._spinning = True
-
- def terminate(self):
- with self._lock:
- if self._active:
- self._active = False
- if not self._spinning:
- self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
- self._spinning = True
-
- def consume_and_terminate(self, value):
- with self._lock:
- if self._active:
- self._active = False
- if self._spinning:
- self._values.append(value)
- else:
- self._pool.submit(self._spin, self._sink, value, True)
- self._spinning = True
+ """A Consumer decorator that affords serialization and asynchrony."""
+
+ def __init__(self, sink, pool):
+ self._lock = threading.Lock()
+ self._sink = sink
+ self._pool = pool
+ # True if self._spin has been submitted to the pool to be called once and
+ # that call has not yet returned, False otherwise.
+ self._spinning = False
+ self._values = []
+ self._active = True
+
+ def _spin(self, sink, value, terminate):
+ while True:
+ try:
+ if value is _NO_VALUE:
+ sink.terminate()
+ elif terminate:
+ sink.consume_and_terminate(value)
+ else:
+ sink.consume(value)
+ except Exception as e: # pylint:disable=broad-except
+ logging.exception(e)
+
+ with self._lock:
+ if terminate:
+ self._spinning = False
+ return
+ elif self._values:
+ value = self._values.pop(0)
+ terminate = not self._values and not self._active
+ elif not self._active:
+ value = _NO_VALUE
+ terminate = True
+ else:
+ self._spinning = False
+ return
+
+ def consume(self, value):
+ with self._lock:
+ if self._active:
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, False)
+ self._spinning = True
+
+ def terminate(self):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if not self._spinning:
+ self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
+ self._spinning = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, True)
+ self._spinning = True
diff --git a/src/python/grpcio/grpc/framework/interfaces/__init__.py b/src/python/grpcio/grpc/framework/interfaces/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/interfaces/__init__.py
+++ b/src/python/grpcio/grpc/framework/interfaces/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/interfaces/base/__init__.py b/src/python/grpcio/grpc/framework/interfaces/base/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/interfaces/base/__init__.py
+++ b/src/python/grpcio/grpc/framework/interfaces/base/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/interfaces/base/base.py b/src/python/grpcio/grpc/framework/interfaces/base/base.py
index a2ddd9c474..cb3328296c 100644
--- a/src/python/grpcio/grpc/framework/interfaces/base/base.py
+++ b/src/python/grpcio/grpc/framework/interfaces/base/base.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""The base interface of RPC Framework.
Implementations of this interface support the conduct of "operations":
@@ -49,7 +48,7 @@ from grpc.framework.foundation import abandonment # pylint: disable=unused-impo
class NoSuchMethodError(Exception):
- """Indicates that an unrecognized operation has been called.
+ """Indicates that an unrecognized operation has been called.
Attributes:
code: A code value to communicate to the other side of the operation along
@@ -58,8 +57,8 @@ class NoSuchMethodError(Exception):
along with indication of operation termination. May be None.
"""
- def __init__(self, code, details):
- """Constructor.
+ def __init__(self, code, details):
+ """Constructor.
Args:
code: A code value to communicate to the other side of the operation
@@ -67,12 +66,12 @@ class NoSuchMethodError(Exception):
details: A details value to communicate to the other side of the
operation along with indication of operation termination. May be None.
"""
- self.code = code
- self.details = details
+ self.code = code
+ self.details = details
class Outcome(object):
- """The outcome of an operation.
+ """The outcome of an operation.
Attributes:
kind: A Kind value coarsely identifying how the operation terminated.
@@ -82,23 +81,23 @@ class Outcome(object):
provided.
"""
- @enum.unique
- class Kind(enum.Enum):
- """Ways in which an operation can terminate."""
+ @enum.unique
+ class Kind(enum.Enum):
+ """Ways in which an operation can terminate."""
- COMPLETED = 'completed'
- CANCELLED = 'cancelled'
- EXPIRED = 'expired'
- LOCAL_SHUTDOWN = 'local shutdown'
- REMOTE_SHUTDOWN = 'remote shutdown'
- RECEPTION_FAILURE = 'reception failure'
- TRANSMISSION_FAILURE = 'transmission failure'
- LOCAL_FAILURE = 'local failure'
- REMOTE_FAILURE = 'remote failure'
+ COMPLETED = 'completed'
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ RECEPTION_FAILURE = 'reception failure'
+ TRANSMISSION_FAILURE = 'transmission failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
class Completion(six.with_metaclass(abc.ABCMeta)):
- """An aggregate of the values exchanged upon operation completion.
+ """An aggregate of the values exchanged upon operation completion.
Attributes:
terminal_metadata: A terminal metadata value for the operaton.
@@ -108,21 +107,21 @@ class Completion(six.with_metaclass(abc.ABCMeta)):
class OperationContext(six.with_metaclass(abc.ABCMeta)):
- """Provides operation-related information and action."""
+ """Provides operation-related information and action."""
- @abc.abstractmethod
- def outcome(self):
- """Indicates the operation's outcome (or that the operation is ongoing).
+ @abc.abstractmethod
+ def outcome(self):
+ """Indicates the operation's outcome (or that the operation is ongoing).
Returns:
None if the operation is still active or the Outcome value for the
operation if it has terminated.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_termination_callback(self, callback):
- """Adds a function to be called upon operation termination.
+ @abc.abstractmethod
+ def add_termination_callback(self, callback):
+ """Adds a function to be called upon operation termination.
Args:
callback: A callable to be passed an Outcome value on operation
@@ -134,42 +133,44 @@ class OperationContext(six.with_metaclass(abc.ABCMeta)):
terminated an Outcome value describing the operation termination and the
passed callback will not be called as a result of this method call.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def time_remaining(self):
- """Describes the length of allowed time remaining for the operation.
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the operation.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the operation to complete before it is considered to have
timed out. Zero is returned if the operation has terminated.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def cancel(self):
- """Cancels the operation if the operation has not yet terminated."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the operation if the operation has not yet terminated."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def fail(self, exception):
- """Indicates that the operation has failed.
+ @abc.abstractmethod
+ def fail(self, exception):
+ """Indicates that the operation has failed.
Args:
exception: An exception germane to the operation failure. May be None.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class Operator(six.with_metaclass(abc.ABCMeta)):
- """An interface through which to participate in an operation."""
+ """An interface through which to participate in an operation."""
- @abc.abstractmethod
- def advance(
- self, initial_metadata=None, payload=None, completion=None,
- allowance=None):
- """Progresses the operation.
+ @abc.abstractmethod
+ def advance(self,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ allowance=None):
+ """Progresses the operation.
Args:
initial_metadata: An initial metadata value. Only one may ever be
@@ -181,23 +182,24 @@ class Operator(six.with_metaclass(abc.ABCMeta)):
allowance: A positive integer communicating the number of additional
payloads allowed to be passed by the remote side of the operation.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
+
class ProtocolReceiver(six.with_metaclass(abc.ABCMeta)):
- """A means of receiving protocol values during an operation."""
+ """A means of receiving protocol values during an operation."""
- @abc.abstractmethod
- def context(self, protocol_context):
- """Accepts the protocol context object for the operation.
+ @abc.abstractmethod
+ def context(self, protocol_context):
+ """Accepts the protocol context object for the operation.
Args:
protocol_context: The protocol context object for the operation.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class Subscription(six.with_metaclass(abc.ABCMeta)):
- """Describes customer code's interest in values from the other side.
+ """Describes customer code's interest in values from the other side.
Attributes:
kind: A Kind value describing the overall kind of this value.
@@ -215,20 +217,20 @@ class Subscription(six.with_metaclass(abc.ABCMeta)):
Kind.FULL.
"""
- @enum.unique
- class Kind(enum.Enum):
+ @enum.unique
+ class Kind(enum.Enum):
- NONE = 'none'
- TERMINATION_ONLY = 'termination only'
- FULL = 'full'
+ NONE = 'none'
+ TERMINATION_ONLY = 'termination only'
+ FULL = 'full'
class Servicer(six.with_metaclass(abc.ABCMeta)):
- """Interface for service implementations."""
+ """Interface for service implementations."""
- @abc.abstractmethod
- def service(self, group, method, context, output_operator):
- """Services an operation.
+ @abc.abstractmethod
+ def service(self, group, method, context, output_operator):
+ """Services an operation.
Args:
group: The group identifier of the operation to be serviced.
@@ -248,20 +250,20 @@ class Servicer(six.with_metaclass(abc.ABCMeta)):
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class End(six.with_metaclass(abc.ABCMeta)):
- """Common type for entry-point objects on both sides of an operation."""
+ """Common type for entry-point objects on both sides of an operation."""
- @abc.abstractmethod
- def start(self):
- """Starts this object's service of operations."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def start(self):
+ """Starts this object's service of operations."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def stop(self, grace):
- """Stops this object's service of operations.
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this object's service of operations.
This object will refuse service of new operations as soon as this method is
called but operations under way at the time of the call may be given a
@@ -281,13 +283,19 @@ class End(six.with_metaclass(abc.ABCMeta)):
much sooner (if for example this End had no operations in progress at
the time its stop method was called).
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def operate(
- self, group, method, subscription, timeout, initial_metadata=None,
- payload=None, completion=None, protocol_options=None):
- """Commences an operation.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def operate(self,
+ group,
+ method,
+ subscription,
+ timeout,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ protocol_options=None):
+ """Commences an operation.
Args:
group: The group identifier of the invoked operation.
@@ -312,23 +320,23 @@ class End(six.with_metaclass(abc.ABCMeta)):
returned pair is an Operator to which operation values not passed in
this call should later be passed.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def operation_stats(self):
- """Reports the number of terminated operations broken down by outcome.
+ @abc.abstractmethod
+ def operation_stats(self):
+ """Reports the number of terminated operations broken down by outcome.
Returns:
A dictionary from Outcome.Kind value to an integer identifying the number
of operations that terminated with that outcome kind.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_idle_action(self, action):
- """Adds an action to be called when this End has no ongoing operations.
+ @abc.abstractmethod
+ def add_idle_action(self, action):
+ """Adds an action to be called when this End has no ongoing operations.
Args:
action: A callable that accepts no arguments.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
index 87a85018f5..461706ff9f 100644
--- a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
+++ b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for use with the base interface of RPC Framework."""
import collections
@@ -34,27 +33,30 @@ import collections
from grpc.framework.interfaces.base import base
-class _Completion(
- base.Completion,
- collections.namedtuple(
- '_Completion', ('terminal_metadata', 'code', 'message',))):
- """A trivial implementation of base.Completion."""
+class _Completion(base.Completion,
+ collections.namedtuple('_Completion', (
+ 'terminal_metadata',
+ 'code',
+ 'message',))):
+ """A trivial implementation of base.Completion."""
+
+class _Subscription(base.Subscription,
+ collections.namedtuple('_Subscription', (
+ 'kind',
+ 'termination_callback',
+ 'allowance',
+ 'operator',
+ 'protocol_receiver',))):
+ """A trivial implementation of base.Subscription."""
-class _Subscription(
- base.Subscription,
- collections.namedtuple(
- '_Subscription',
- ('kind', 'termination_callback', 'allowance', 'operator',
- 'protocol_receiver',))):
- """A trivial implementation of base.Subscription."""
-_NONE_SUBSCRIPTION = _Subscription(
- base.Subscription.Kind.NONE, None, None, None, None)
+_NONE_SUBSCRIPTION = _Subscription(base.Subscription.Kind.NONE, None, None,
+ None, None)
def completion(terminal_metadata, code, message):
- """Creates a base.Completion aggregating the given operation values.
+ """Creates a base.Completion aggregating the given operation values.
Args:
terminal_metadata: A terminal metadata value for an operaton.
@@ -64,11 +66,11 @@ def completion(terminal_metadata, code, message):
Returns:
A base.Completion aggregating the given operation values.
"""
- return _Completion(terminal_metadata, code, message)
+ return _Completion(terminal_metadata, code, message)
def full_subscription(operator, protocol_receiver):
- """Creates a "full" base.Subscription for the given base.Operator.
+ """Creates a "full" base.Subscription for the given base.Operator.
Args:
operator: A base.Operator to be used in an operation.
@@ -78,5 +80,5 @@ def full_subscription(operator, protocol_receiver):
A base.Subscription of kind base.Subscription.Kind.FULL wrapping the given
base.Operator and base.ProtocolReceiver.
"""
- return _Subscription(
- base.Subscription.Kind.FULL, None, None, operator, protocol_receiver)
+ return _Subscription(base.Subscription.Kind.FULL, None, None, operator,
+ protocol_receiver)
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/__init__.py b/src/python/grpcio/grpc/framework/interfaces/face/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio/grpc/framework/interfaces/face/__init__.py
+++ b/src/python/grpcio/grpc/framework/interfaces/face/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/face.py b/src/python/grpcio/grpc/framework/interfaces/face/face.py
index 4826e7fff6..36ddca18c1 100644
--- a/src/python/grpcio/grpc/framework/interfaces/face/face.py
+++ b/src/python/grpcio/grpc/framework/interfaces/face/face.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Interfaces defining the Face layer of RPC Framework."""
import abc
@@ -45,33 +44,38 @@ from grpc.framework.foundation import stream # pylint: disable=unused-import
class NoSuchMethodError(Exception):
- """Raised by customer code to indicate an unrecognized method.
+ """Raised by customer code to indicate an unrecognized method.
Attributes:
group: The group of the unrecognized method.
name: The name of the unrecognized method.
"""
- def __init__(self, group, method):
- """Constructor.
+ def __init__(self, group, method):
+ """Constructor.
Args:
group: The group identifier of the unrecognized RPC name.
method: The method identifier of the unrecognized RPC name.
"""
- super(NoSuchMethodError, self).__init__()
- self.group = group
- self.method = method
+ super(NoSuchMethodError, self).__init__()
+ self.group = group
+ self.method = method
- def __repr__(self):
- return 'face.NoSuchMethodError(%s, %s)' % (self.group, self.method,)
+ def __repr__(self):
+ return 'face.NoSuchMethodError(%s, %s)' % (
+ self.group,
+ self.method,)
class Abortion(
- collections.namedtuple(
- 'Abortion',
- ('kind', 'initial_metadata', 'terminal_metadata', 'code', 'details',))):
- """A value describing RPC abortion.
+ collections.namedtuple('Abortion', (
+ 'kind',
+ 'initial_metadata',
+ 'terminal_metadata',
+ 'code',
+ 'details',))):
+ """A value describing RPC abortion.
Attributes:
kind: A Kind value identifying how the RPC failed.
@@ -85,21 +89,21 @@ class Abortion(
details value was received.
"""
- @enum.unique
- class Kind(enum.Enum):
- """Types of RPC abortion."""
+ @enum.unique
+ class Kind(enum.Enum):
+ """Types of RPC abortion."""
- CANCELLED = 'cancelled'
- EXPIRED = 'expired'
- LOCAL_SHUTDOWN = 'local shutdown'
- REMOTE_SHUTDOWN = 'remote shutdown'
- NETWORK_FAILURE = 'network failure'
- LOCAL_FAILURE = 'local failure'
- REMOTE_FAILURE = 'remote failure'
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ NETWORK_FAILURE = 'network failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
class AbortionError(six.with_metaclass(abc.ABCMeta, Exception)):
- """Common super type for exceptions indicating RPC abortion.
+ """Common super type for exceptions indicating RPC abortion.
initial_metadata: The initial metadata from the other side of the RPC or
None if no initial metadata value was received.
@@ -111,100 +115,100 @@ class AbortionError(six.with_metaclass(abc.ABCMeta, Exception)):
details value was received.
"""
- def __init__(self, initial_metadata, terminal_metadata, code, details):
- super(AbortionError, self).__init__()
- self.initial_metadata = initial_metadata
- self.terminal_metadata = terminal_metadata
- self.code = code
- self.details = details
+ def __init__(self, initial_metadata, terminal_metadata, code, details):
+ super(AbortionError, self).__init__()
+ self.initial_metadata = initial_metadata
+ self.terminal_metadata = terminal_metadata
+ self.code = code
+ self.details = details
- def __str__(self):
- return '%s(code=%s, details="%s")' % (
- self.__class__.__name__, self.code, self.details)
+ def __str__(self):
+ return '%s(code=%s, details="%s")' % (self.__class__.__name__,
+ self.code, self.details)
class CancellationError(AbortionError):
- """Indicates that an RPC has been cancelled."""
+ """Indicates that an RPC has been cancelled."""
class ExpirationError(AbortionError):
- """Indicates that an RPC has expired ("timed out")."""
+ """Indicates that an RPC has expired ("timed out")."""
class LocalShutdownError(AbortionError):
- """Indicates that an RPC has terminated due to local shutdown of RPCs."""
+ """Indicates that an RPC has terminated due to local shutdown of RPCs."""
class RemoteShutdownError(AbortionError):
- """Indicates that an RPC has terminated due to remote shutdown of RPCs."""
+ """Indicates that an RPC has terminated due to remote shutdown of RPCs."""
class NetworkError(AbortionError):
- """Indicates that some error occurred on the network."""
+ """Indicates that some error occurred on the network."""
class LocalError(AbortionError):
- """Indicates that an RPC has terminated due to a local defect."""
+ """Indicates that an RPC has terminated due to a local defect."""
class RemoteError(AbortionError):
- """Indicates that an RPC has terminated due to a remote defect."""
+ """Indicates that an RPC has terminated due to a remote defect."""
class RpcContext(six.with_metaclass(abc.ABCMeta)):
- """Provides RPC-related information and action."""
+ """Provides RPC-related information and action."""
- @abc.abstractmethod
- def is_active(self):
- """Describes whether the RPC is active or has terminated."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def time_remaining(self):
- """Describes the length of allowed time remaining for the RPC.
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def add_abortion_callback(self, abortion_callback):
- """Registers a callback to be called if the RPC is aborted.
+ @abc.abstractmethod
+ def add_abortion_callback(self, abortion_callback):
+ """Registers a callback to be called if the RPC is aborted.
Args:
abortion_callback: A callable to be called and passed an Abortion value
in the event of RPC abortion.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def cancel(self):
- """Cancels the RPC.
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
Idempotent and has no effect if the RPC has already terminated.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def protocol_context(self):
- """Accesses a custom object specified by an implementation provider.
+ @abc.abstractmethod
+ def protocol_context(self):
+ """Accesses a custom object specified by an implementation provider.
Returns:
A value specified by the provider of a Face interface implementation
affording custom state and behavior.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
- """Invocation-side utility object for an RPC."""
+ """Invocation-side utility object for an RPC."""
- @abc.abstractmethod
- def initial_metadata(self):
- """Accesses the initial metadata from the service-side of the RPC.
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata from the service-side of the RPC.
This method blocks until the value is available or is known not to have been
emitted from the service-side of the RPC.
@@ -213,11 +217,11 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
The initial metadata object emitted by the service-side of the RPC, or
None if there was no such value.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def terminal_metadata(self):
- """Accesses the terminal metadata from the service-side of the RPC.
+ @abc.abstractmethod
+ def terminal_metadata(self):
+ """Accesses the terminal metadata from the service-side of the RPC.
This method blocks until the value is available or is known not to have been
emitted from the service-side of the RPC.
@@ -226,11 +230,11 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
The terminal metadata object emitted by the service-side of the RPC, or
None if there was no such value.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def code(self):
- """Accesses the code emitted by the service-side of the RPC.
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the code emitted by the service-side of the RPC.
This method blocks until the value is available or is known not to have been
emitted from the service-side of the RPC.
@@ -239,11 +243,11 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
The code object emitted by the service-side of the RPC, or None if there
was no such value.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def details(self):
- """Accesses the details value emitted by the service-side of the RPC.
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details value emitted by the service-side of the RPC.
This method blocks until the value is available or is known not to have been
emitted from the service-side of the RPC.
@@ -252,15 +256,15 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
The details value emitted by the service-side of the RPC, or None if there
was no such value.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
- """A context object passed to method implementations."""
+ """A context object passed to method implementations."""
- @abc.abstractmethod
- def invocation_metadata(self):
- """Accesses the metadata from the invocation-side of the RPC.
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the invocation-side of the RPC.
This method blocks until the value is available or is known not to have been
emitted from the invocation-side of the RPC.
@@ -269,11 +273,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
The metadata object emitted by the invocation-side of the RPC, or None if
there was no such value.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def initial_metadata(self, initial_metadata):
- """Accepts the service-side initial metadata value of the RPC.
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Accepts the service-side initial metadata value of the RPC.
This method need not be called by method implementations if they have no
service-side initial metadata to transmit.
@@ -282,11 +286,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
initial_metadata: The service-side initial metadata value of the RPC to
be transmitted to the invocation side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def terminal_metadata(self, terminal_metadata):
- """Accepts the service-side terminal metadata value of the RPC.
+ @abc.abstractmethod
+ def terminal_metadata(self, terminal_metadata):
+ """Accepts the service-side terminal metadata value of the RPC.
This method need not be called by method implementations if they have no
service-side terminal metadata to transmit.
@@ -295,11 +299,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
terminal_metadata: The service-side terminal metadata value of the RPC to
be transmitted to the invocation side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def code(self, code):
- """Accepts the service-side code of the RPC.
+ @abc.abstractmethod
+ def code(self, code):
+ """Accepts the service-side code of the RPC.
This method need not be called by method implementations if they have no
code to transmit.
@@ -308,11 +312,11 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
code: The code of the RPC to be transmitted to the invocation side of the
RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def details(self, details):
- """Accepts the service-side details of the RPC.
+ @abc.abstractmethod
+ def details(self, details):
+ """Accepts the service-side details of the RPC.
This method need not be called by method implementations if they have no
service-side details to transmit.
@@ -321,34 +325,34 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
details: The service-side details value of the RPC to be transmitted to
the invocation side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class ResponseReceiver(six.with_metaclass(abc.ABCMeta)):
- """Invocation-side object used to accept the output of an RPC."""
+ """Invocation-side object used to accept the output of an RPC."""
- @abc.abstractmethod
- def initial_metadata(self, initial_metadata):
- """Receives the initial metadata from the service-side of the RPC.
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Receives the initial metadata from the service-side of the RPC.
Args:
initial_metadata: The initial metadata object emitted from the
service-side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def response(self, response):
- """Receives a response from the service-side of the RPC.
+ @abc.abstractmethod
+ def response(self, response):
+ """Receives a response from the service-side of the RPC.
Args:
response: A response object emitted from the service-side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def complete(self, terminal_metadata, code, details):
- """Receives the completion values emitted from the service-side of the RPC.
+ @abc.abstractmethod
+ def complete(self, terminal_metadata, code, details):
+ """Receives the completion values emitted from the service-side of the RPC.
Args:
terminal_metadata: The terminal metadata object emitted from the
@@ -356,17 +360,20 @@ class ResponseReceiver(six.with_metaclass(abc.ABCMeta)):
code: The code object emitted from the service-side of the RPC.
details: The details object emitted from the service-side of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a unary-unary RPC in any call style."""
+ """Affords invoking a unary-unary RPC in any call style."""
- @abc.abstractmethod
- def __call__(
- self, request, timeout, metadata=None, with_call=False,
- protocol_options=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -385,11 +392,11 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Raises:
AbortionError: Indicating that the RPC was aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def future(self, request, timeout, metadata=None, protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -405,13 +412,17 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
response value of the RPC. In the event of RPC abortion, the returned
Future's exception value will be an AbortionError.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -427,15 +438,15 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
A Call for the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a unary-stream RPC in any call style."""
+ """Affords invoking a unary-stream RPC in any call style."""
- @abc.abstractmethod
- def __call__(self, request, timeout, metadata=None, protocol_options=None):
- """Invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ """Invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -450,13 +461,17 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
values. Drawing response values from the returned iterator may raise
AbortionError indicating abortion of the RPC.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event(
- self, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
@@ -472,17 +487,20 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
Returns:
A Call object for the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a stream-unary RPC in any call style."""
+ """Affords invoking a stream-unary RPC in any call style."""
- @abc.abstractmethod
- def __call__(
- self, request_iterator, timeout, metadata=None,
- with_call=False, protocol_options=None):
- """Synchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -501,12 +519,15 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
Raises:
AbortionError: Indicating that the RPC was aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def future(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -522,13 +543,16 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
response value of the RPC. In the event of RPC abortion, the returned
Future's exception value will be an AbortionError.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
receiver: A ResponseReceiver to be passed the response data of the RPC.
@@ -544,16 +568,19 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
A single object that is both a Call object for the RPC and a
stream.Consumer to which the request values of the RPC should be passed.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
- """Affords invoking a stream-stream RPC in any call style."""
+ """Affords invoking a stream-stream RPC in any call style."""
- @abc.abstractmethod
- def __call__(
- self, request_iterator, timeout, metadata=None, protocol_options=None):
- """Invokes the underlying RPC.
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
@@ -568,13 +595,16 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
values. Drawing response values from the returned iterator may raise
AbortionError indicating abortion of the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def event(
- self, receiver, abortion_callback, timeout, metadata=None,
- protocol_options=None):
- """Asynchronously invokes the underlying RPC.
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
Args:
receiver: A ResponseReceiver to be passed the response data of the RPC.
@@ -590,11 +620,11 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
A single object that is both a Call object for the RPC and a
stream.Consumer to which the request values of the RPC should be passed.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class MethodImplementation(six.with_metaclass(abc.ABCMeta)):
- """A sum type that describes a method implementation.
+ """A sum type that describes a method implementation.
Attributes:
cardinality: A cardinality.Cardinality value.
@@ -639,11 +669,11 @@ class MethodImplementation(six.with_metaclass(abc.ABCMeta)):
class MultiMethodImplementation(six.with_metaclass(abc.ABCMeta)):
- """A general type able to service many methods."""
+ """A general type able to service many methods."""
- @abc.abstractmethod
- def service(self, group, method, response_consumer, context):
- """Services an RPC.
+ @abc.abstractmethod
+ def service(self, group, method, response_consumer, context):
+ """Services an RPC.
Args:
group: The group identifier of the RPC.
@@ -666,17 +696,22 @@ class MultiMethodImplementation(six.with_metaclass(abc.ABCMeta)):
NoSuchMethodError: If this MultiMethod does not recognize the given group
and name for the RPC and is not able to service the RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class GenericStub(six.with_metaclass(abc.ABCMeta)):
- """Affords RPC invocation via generic methods."""
-
- @abc.abstractmethod
- def blocking_unary_unary(
- self, group, method, request, timeout, metadata=None,
- with_call=False, protocol_options=None):
- """Invokes a unary-request-unary-response method.
+ """Affords RPC invocation via generic methods."""
+
+ @abc.abstractmethod
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
@@ -700,13 +735,17 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Raises:
AbortionError: Indicating that the RPC was aborted.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def future_unary_unary(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- """Invokes a unary-request-unary-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
Args:
group: The group identifier of the RPC.
@@ -723,13 +762,17 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
response value of the RPC. In the event of RPC abortion, the returned
Future's exception value will be an AbortionError.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def inline_unary_stream(
- self, group, method, request, timeout, metadata=None,
- protocol_options=None):
- """Invokes a unary-request-stream-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-stream-response method.
Args:
group: The group identifier of the RPC.
@@ -745,13 +788,18 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
values. Drawing response values from the returned iterator may raise
AbortionError indicating abortion of the RPC.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def blocking_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- with_call=False, protocol_options=None):
- """Invokes a stream-request-unary-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
@@ -775,13 +823,17 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Raises:
AbortionError: Indicating that the RPC was aborted.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def future_stream_unary(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- """Invokes a stream-request-unary-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
Args:
group: The group identifier of the RPC.
@@ -798,13 +850,17 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
response value of the RPC. In the event of RPC abortion, the returned
Future's exception value will be an AbortionError.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def inline_stream_stream(
- self, group, method, request_iterator, timeout, metadata=None,
- protocol_options=None):
- """Invokes a stream-request-stream-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-stream-response method.
Args:
group: The group identifier of the RPC.
@@ -820,13 +876,19 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
values. Drawing response values from the returned iterator may raise
AbortionError indicating abortion of the RPC.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event_unary_unary(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Event-driven invocation of a unary-request-unary-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
Args:
group: The group identifier of the RPC.
@@ -843,13 +905,19 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A Call for the RPC.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event_unary_stream(
- self, group, method, request, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Event-driven invocation of a unary-request-stream-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
Args:
group: The group identifier of the RPC.
@@ -866,13 +934,18 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A Call for the RPC.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event_stream_unary(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Event-driven invocation of a unary-request-unary-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
Args:
group: The group identifier of the RPC.
@@ -889,13 +962,18 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def event_stream_stream(
- self, group, method, receiver, abortion_callback, timeout,
- metadata=None, protocol_options=None):
- """Event-driven invocation of a unary-request-stream-response method.
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
Args:
group: The group identifier of the RPC.
@@ -912,11 +990,11 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unary_unary(self, group, method):
- """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+ @abc.abstractmethod
+ def unary_unary(self, group, method):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
group: The group identifier of the RPC.
@@ -925,11 +1003,11 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unary_stream(self, group, method):
- """Creates a UnaryStreamMultiCallable for a unary-stream method.
+ @abc.abstractmethod
+ def unary_stream(self, group, method):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
Args:
group: The group identifier of the RPC.
@@ -938,11 +1016,11 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A UnaryStreamMultiCallable value for the name unary-stream method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_unary(self, group, method):
- """Creates a StreamUnaryMultiCallable for a stream-unary method.
+ @abc.abstractmethod
+ def stream_unary(self, group, method):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
Args:
group: The group identifier of the RPC.
@@ -951,11 +1029,11 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A StreamUnaryMultiCallable value for the named stream-unary method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_stream(self, group, method):
- """Creates a StreamStreamMultiCallable for a stream-stream method.
+ @abc.abstractmethod
+ def stream_stream(self, group, method):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
Args:
group: The group identifier of the RPC.
@@ -964,11 +1042,11 @@ class GenericStub(six.with_metaclass(abc.ABCMeta)):
Returns:
A StreamStreamMultiCallable value for the named stream-stream method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class DynamicStub(six.with_metaclass(abc.ABCMeta)):
- """Affords RPC invocation via attributes corresponding to afforded methods.
+ """Affords RPC invocation via attributes corresponding to afforded methods.
Instances of this type may be scoped to a single group so that attribute
access is unambiguous.
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/utilities.py b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py
index db2ec6ed87..39a642f0ae 100644
--- a/src/python/grpcio/grpc/framework/interfaces/face/utilities.py
+++ b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for RPC Framework's Face interface."""
import collections
@@ -38,18 +37,24 @@ from grpc.framework.foundation import stream # pylint: disable=unused-import
from grpc.framework.interfaces.face import face
-class _MethodImplementation(
- face.MethodImplementation,
- collections.namedtuple(
- '_MethodImplementation',
- ['cardinality', 'style', 'unary_unary_inline', 'unary_stream_inline',
- 'stream_unary_inline', 'stream_stream_inline', 'unary_unary_event',
- 'unary_stream_event', 'stream_unary_event', 'stream_stream_event',])):
- pass
+class _MethodImplementation(face.MethodImplementation,
+ collections.namedtuple('_MethodImplementation', [
+ 'cardinality',
+ 'style',
+ 'unary_unary_inline',
+ 'unary_stream_inline',
+ 'stream_unary_inline',
+ 'stream_stream_inline',
+ 'unary_unary_event',
+ 'unary_stream_event',
+ 'stream_unary_event',
+ 'stream_stream_event',
+ ])):
+ pass
def unary_unary_inline(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-unary RPC method as a callable value
@@ -59,13 +64,13 @@ def unary_unary_inline(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.UNARY_UNARY, style.Service.INLINE, behavior,
- None, None, None, None, None, None, None)
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.INLINE, behavior, None, None,
+ None, None, None, None, None)
def unary_stream_inline(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-stream RPC method as a callable
@@ -75,13 +80,13 @@ def unary_stream_inline(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.UNARY_STREAM, style.Service.INLINE, None,
- behavior, None, None, None, None, None, None)
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.INLINE, None, behavior, None,
+ None, None, None, None, None)
def stream_unary_inline(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-unary RPC method as a callable
@@ -91,13 +96,13 @@ def stream_unary_inline(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.STREAM_UNARY, style.Service.INLINE, None, None,
- behavior, None, None, None, None, None)
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.INLINE, None, None, behavior,
+ None, None, None, None, None)
def stream_stream_inline(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-stream RPC method as a callable
@@ -107,13 +112,13 @@ def stream_stream_inline(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.STREAM_STREAM, style.Service.INLINE, None, None,
- None, behavior, None, None, None, None)
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.INLINE, None, None, None,
+ behavior, None, None, None, None)
def unary_unary_event(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-unary RPC method as a callable
@@ -123,13 +128,13 @@ def unary_unary_event(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.UNARY_UNARY, style.Service.EVENT, None, None,
- None, None, behavior, None, None, None)
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ behavior, None, None, None)
def unary_stream_event(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-stream RPC method as a callable
@@ -139,13 +144,13 @@ def unary_stream_event(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.UNARY_STREAM, style.Service.EVENT, None, None,
- None, None, None, behavior, None, None)
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, behavior, None, None)
def stream_unary_event(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-unary RPC method as a callable
@@ -156,13 +161,13 @@ def stream_unary_event(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.STREAM_UNARY, style.Service.EVENT, None, None,
- None, None, None, None, behavior, None)
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ None, None, behavior, None)
def stream_stream_event(behavior):
- """Creates an face.MethodImplementation for the given behavior.
+ """Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-stream RPC method as a callable
@@ -173,6 +178,6 @@ def stream_stream_event(behavior):
Returns:
An face.MethodImplementation derived from the given behavior.
"""
- return _MethodImplementation(
- cardinality.Cardinality.STREAM_STREAM, style.Service.EVENT, None, None,
- None, None, None, None, None, behavior)
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, None, None, behavior)
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index f58dd8ea40..470576be51 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -159,6 +159,8 @@ CORE_SOURCE_FILES = [
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
+ 'src/core/lib/slice/slice_hash_table.c',
+ 'src/core/lib/slice/slice_intern.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
@@ -181,12 +183,13 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/bdp_estimator.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
- 'src/core/lib/transport/mdstr_hash_table.c',
+ 'src/core/lib/transport/error_utils.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/service_config.c',
'src/core/lib/transport/static_metadata.c',
+ 'src/core/lib/transport/status_conversion.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
@@ -207,7 +210,6 @@ CORE_SOURCE_FILES = [
'src/core/ext/transport/chttp2/transport/huffsyms.c',
'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
'src/core/ext/transport/chttp2/transport/parsing.c',
- 'src/core/ext/transport/chttp2/transport/status_conversion.c',
'src/core/ext/transport/chttp2/transport/stream_lists.c',
'src/core/ext/transport/chttp2/transport/stream_map.c',
'src/core/ext/transport/chttp2/transport/varint.c',
diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py
index b226e690fd..a228ba4a48 100644
--- a/src/python/grpcio/support.py
+++ b/src/python/grpcio/support.py
@@ -27,7 +27,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
import os
import os.path
import shutil
@@ -38,7 +37,6 @@ from distutils import errors
import commands
-
C_PYTHON_DEV = """
#include <Python.h>
int main(int argc, char **argv) { return 0; }
@@ -55,69 +53,70 @@ Could not find <Python.h>. This could mean the following:
(check your environment variables or try re-installing?)
"""
-C_CHECKS = {
- C_PYTHON_DEV: C_PYTHON_DEV_ERROR_MESSAGE,
-}
+C_CHECKS = {C_PYTHON_DEV: C_PYTHON_DEV_ERROR_MESSAGE,}
+
def _compile(compiler, source_string):
- tempdir = tempfile.mkdtemp()
- cpath = os.path.join(tempdir, 'a.c')
- with open(cpath, 'w') as cfile:
- cfile.write(source_string)
- try:
- compiler.compile([cpath])
- except errors.CompileError as error:
- return error
- finally:
- shutil.rmtree(tempdir)
+ tempdir = tempfile.mkdtemp()
+ cpath = os.path.join(tempdir, 'a.c')
+ with open(cpath, 'w') as cfile:
+ cfile.write(source_string)
+ try:
+ compiler.compile([cpath])
+ except errors.CompileError as error:
+ return error
+ finally:
+ shutil.rmtree(tempdir)
+
def _expect_compile(compiler, source_string, error_message):
- if _compile(compiler, source_string) is not None:
- sys.stderr.write(error_message)
- raise commands.CommandError(
- "Diagnostics found a compilation environment issue:\n{}"
+ if _compile(compiler, source_string) is not None:
+ sys.stderr.write(error_message)
+ raise commands.CommandError(
+ "Diagnostics found a compilation environment issue:\n{}"
.format(error_message))
+
def diagnose_compile_error(build_ext, error):
- """Attempt to diagnose an error during compilation."""
- for c_check, message in C_CHECKS.items():
- _expect_compile(build_ext.compiler, c_check, message)
- python_sources = [
- source for source in build_ext.get_source_files()
- if source.startswith('./src/python') and source.endswith('c')
- ]
- for source in python_sources:
- if not os.path.isfile(source):
- raise commands.CommandError(
- ("Diagnostics found a missing Python extension source file:\n{}\n\n"
- "This is usually because the Cython sources haven't been transpiled "
- "into C yet and you're building from source.\n"
- "Try setting the environment variable "
- "`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or "
- "when using `pip`, e.g.:\n\n"
- "pip install -rrequirements.txt\n"
- "GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .")
- .format(source)
- )
+ """Attempt to diagnose an error during compilation."""
+ for c_check, message in C_CHECKS.items():
+ _expect_compile(build_ext.compiler, c_check, message)
+ python_sources = [
+ source for source in build_ext.get_source_files()
+ if source.startswith('./src/python') and source.endswith('c')
+ ]
+ for source in python_sources:
+ if not os.path.isfile(source):
+ raise commands.CommandError((
+ "Diagnostics found a missing Python extension source file:\n{}\n\n"
+ "This is usually because the Cython sources haven't been transpiled "
+ "into C yet and you're building from source.\n"
+ "Try setting the environment variable "
+ "`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or "
+ "when using `pip`, e.g.:\n\n"
+ "pip install -rrequirements.txt\n"
+ "GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .").format(source))
+
def diagnose_attribute_error(build_ext, error):
- if any('_needs_stub' in arg for arg in error.args):
- raise commands.CommandError(
- "We expect a missing `_needs_stub` attribute from older versions of "
- "setuptools. Consider upgrading setuptools.")
+ if any('_needs_stub' in arg for arg in error.args):
+ raise commands.CommandError(
+ "We expect a missing `_needs_stub` attribute from older versions of "
+ "setuptools. Consider upgrading setuptools.")
+
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
AttributeError: diagnose_attribute_error
}
-def diagnose_build_ext_error(build_ext, error, formatted):
- diagnostic = _ERROR_DIAGNOSES.get(type(error))
- if diagnostic is None:
- raise commands.CommandError(
- "\n\nWe could not diagnose your build failure. Please file an issue at "
- "http://www.github.com/grpc/grpc with `[Python install]` in the title."
- "\n\n{}".format(formatted))
- else:
- diagnostic(build_ext, error)
+def diagnose_build_ext_error(build_ext, error, formatted):
+ diagnostic = _ERROR_DIAGNOSES.get(type(error))
+ if diagnostic is None:
+ raise commands.CommandError(
+ "\n\nWe could not diagnose your build failure. Please file an issue at "
+ "http://www.github.com/grpc/grpc with `[Python install]` in the title."
+ "\n\n{}".format(formatted))
+ else:
+ diagnostic(build_ext, error)
diff --git a/src/python/grpcio_health_checking/grpc_health/__init__.py b/src/python/grpcio_health_checking/grpc_health/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_health_checking/grpc_health/__init__.py
+++ b/src/python/grpcio_health_checking/grpc_health/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_health_checking/grpc_health/v1/__init__.py b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_health_checking/grpc_health/v1/__init__.py
+++ b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_health_checking/grpc_health/v1/health.py b/src/python/grpcio_health_checking/grpc_health/v1/health.py
index 0df679b0e2..f0f11cf84b 100644
--- a/src/python/grpcio_health_checking/grpc_health/v1/health.py
+++ b/src/python/grpcio_health_checking/grpc_health/v1/health.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Reference implementation for health checking in gRPC Python."""
import threading
@@ -37,23 +36,23 @@ from grpc_health.v1 import health_pb2
class HealthServicer(health_pb2.HealthServicer):
- """Servicer handling RPCs for service statuses."""
+ """Servicer handling RPCs for service statuses."""
- def __init__(self):
- self._server_status_lock = threading.Lock()
- self._server_status = {}
+ def __init__(self):
+ self._server_status_lock = threading.Lock()
+ self._server_status = {}
- def Check(self, request, context):
- with self._server_status_lock:
- status = self._server_status.get(request.service)
- if status is None:
- context.set_code(grpc.StatusCode.NOT_FOUND)
- return health_pb2.HealthCheckResponse()
- else:
- return health_pb2.HealthCheckResponse(status=status)
+ def Check(self, request, context):
+ with self._server_status_lock:
+ status = self._server_status.get(request.service)
+ if status is None:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ return health_pb2.HealthCheckResponse()
+ else:
+ return health_pb2.HealthCheckResponse(status=status)
- def set(self, service, status):
- """Sets the status of a service.
+ def set(self, service, status):
+ """Sets the status of a service.
Args:
service: string, the name of the service.
@@ -61,5 +60,5 @@ class HealthServicer(health_pb2.HealthServicer):
status: HealthCheckResponse.status enum value indicating
the status of the service
"""
- with self._server_status_lock:
- self._server_status[service] = status
+ with self._server_status_lock:
+ self._server_status[service] = status
diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py
index 0c420a655f..14375a74c0 100644
--- a/src/python/grpcio_health_checking/health_commands.py
+++ b/src/python/grpcio_health_checking/health_commands.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Provides distutils command classes for the GRPC Python setup process."""
import os
@@ -39,40 +38,40 @@ HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto')
class CopyProtoModules(setuptools.Command):
- """Command to copy proto modules from grpc/src/proto."""
+ """Command to copy proto modules from grpc/src/proto."""
- description = ''
- user_options = []
+ description = ''
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- if os.path.isfile(HEALTH_PROTO):
- shutil.copyfile(
- HEALTH_PROTO,
- os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto'))
+ def run(self):
+ if os.path.isfile(HEALTH_PROTO):
+ shutil.copyfile(
+ HEALTH_PROTO,
+ os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto'))
class BuildPackageProtos(setuptools.Command):
- """Command to generate project *_pb2.py modules from proto files."""
+ """Command to generate project *_pb2.py modules from proto files."""
- description = 'build grpc protobuf modules'
- user_options = []
+ description = 'build grpc protobuf modules'
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- # due to limitations of the proto generator, we require that only *one*
- # directory is provided as an 'include' directory. We assume it's the '' key
- # to `self.distribution.package_dir` (and get a key error if it's not
- # there).
- from grpc_tools import command
- command.build_package_protos(self.distribution.package_dir[''])
+ def run(self):
+ # due to limitations of the proto generator, we require that only *one*
+ # directory is provided as an 'include' directory. We assume it's the '' key
+ # to `self.distribution.package_dir` (and get a key error if it's not
+ # there).
+ from grpc_tools import command
+ command.build_package_protos(self.distribution.package_dir[''])
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index e88f389ba8..4c3991dcc4 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Setup module for the GRPC Python package's optional health checking."""
import os
@@ -41,18 +40,14 @@ os.chdir(os.path.dirname(os.path.abspath(__file__)))
import health_commands
import grpc_version
-PACKAGE_DIRECTORIES = {
- '': '.',
-}
+PACKAGE_DIRECTORIES = {'': '.',}
SETUP_REQUIRES = (
- 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
-)
+ 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),)
INSTALL_REQUIRES = (
'protobuf>=3.0.0',
- 'grpcio>={version}'.format(version=grpc_version.VERSION),
-)
+ 'grpcio>={version}'.format(version=grpc_version.VERSION),)
COMMAND_CLASS = {
# Run preprocess from the repository *before* doing any packaging!
@@ -68,5 +63,4 @@ setuptools.setup(
packages=setuptools.find_packages('.'),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
- cmdclass=COMMAND_CLASS
-)
+ cmdclass=COMMAND_CLASS)
diff --git a/src/python/grpcio_reflection/grpc_reflection/__init__.py b/src/python/grpcio_reflection/grpc_reflection/__init__.py
index d5ad73a74a..100a624dc9 100644
--- a/src/python/grpcio_reflection/grpc_reflection/__init__.py
+++ b/src/python/grpcio_reflection/grpc_reflection/__init__.py
@@ -26,4 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py
index d5ad73a74a..100a624dc9 100644
--- a/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py
+++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py
@@ -26,4 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
index bfcbce8e04..87f28396ce 100644
--- a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
+++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Reference implementation for reflection in gRPC Python."""
import threading
@@ -39,105 +38,96 @@ from grpc_reflection.v1alpha import reflection_pb2
_POOL = descriptor_pool.Default()
+
def _not_found_error():
- return reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.NOT_FOUND.value[0],
- error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
- )
- )
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),))
+
def _file_descriptor_response(descriptor):
- proto = descriptor_pb2.FileDescriptorProto()
- descriptor.CopyToProto(proto)
- serialized_proto = proto.SerializeToString()
- return reflection_pb2.ServerReflectionResponse(
- file_descriptor_response=reflection_pb2.FileDescriptorResponse(
- file_descriptor_proto=(serialized_proto,)
- ),
- )
+ proto = descriptor_pb2.FileDescriptorProto()
+ descriptor.CopyToProto(proto)
+ serialized_proto = proto.SerializeToString()
+ return reflection_pb2.ServerReflectionResponse(
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(serialized_proto,)),)
class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
- """Servicer handling RPCs for service statuses."""
+ """Servicer handling RPCs for service statuses."""
- def __init__(self, service_names, pool=None):
- """Constructor.
+ def __init__(self, service_names, pool=None):
+ """Constructor.
Args:
service_names: Iterable of fully-qualified service names available.
"""
- self._service_names = list(service_names)
- self._pool = _POOL if pool is None else pool
-
- def _file_by_filename(self, filename):
- try:
- descriptor = self._pool.FindFileByName(filename)
- except KeyError:
- return _not_found_error()
- else:
- return _file_descriptor_response(descriptor)
-
- def _file_containing_symbol(self, fully_qualified_name):
- try:
- descriptor = self._pool.FindFileContainingSymbol(fully_qualified_name)
- except KeyError:
- return _not_found_error()
- else:
- return _file_descriptor_response(descriptor)
-
- def _file_containing_extension(containing_type, extension_number):
- # TODO(atash) Python protobuf currently doesn't support querying extensions.
- # https://github.com/google/protobuf/issues/2248
- return reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
- error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),
- )
- )
-
- def _extension_numbers_of_type(fully_qualified_name):
- # TODO(atash) We're allowed to leave this unsupported according to the
- # protocol, but we should still eventually implement it. Hits the same issue
- # as `_file_containing_extension`, however.
- # https://github.com/google/protobuf/issues/2248
- return reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
- error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),
- )
- )
+ self._service_names = list(service_names)
+ self._pool = _POOL if pool is None else pool
+
+ def _file_by_filename(self, filename):
+ try:
+ descriptor = self._pool.FindFileByName(filename)
+ except KeyError:
+ return _not_found_error()
+ else:
+ return _file_descriptor_response(descriptor)
+
+ def _file_containing_symbol(self, fully_qualified_name):
+ try:
+ descriptor = self._pool.FindFileContainingSymbol(
+ fully_qualified_name)
+ except KeyError:
+ return _not_found_error()
+ else:
+ return _file_descriptor_response(descriptor)
+
+ def _file_containing_extension(containing_type, extension_number):
+ # TODO(atash) Python protobuf currently doesn't support querying extensions.
+ # https://github.com/google/protobuf/issues/2248
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
+ error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),))
+
+ def _extension_numbers_of_type(fully_qualified_name):
+ # TODO(atash) We're allowed to leave this unsupported according to the
+ # protocol, but we should still eventually implement it. Hits the same issue
+ # as `_file_containing_extension`, however.
+ # https://github.com/google/protobuf/issues/2248
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
+ error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),))
- def _list_services(self):
- return reflection_pb2.ServerReflectionResponse(
- list_services_response=reflection_pb2.ListServiceResponse(
- service=[
+ def _list_services(self):
+ return reflection_pb2.ServerReflectionResponse(
+ list_services_response=reflection_pb2.ListServiceResponse(service=[
reflection_pb2.ServiceResponse(name=service_name)
for service_name in self._service_names
- ]
- )
- )
-
- def ServerReflectionInfo(self, request_iterator, context):
- for request in request_iterator:
- if request.HasField('file_by_filename'):
- yield self._file_by_filename(request.file_by_filename)
- elif request.HasField('file_containing_symbol'):
- yield self._file_containing_symbol(request.file_containing_symbol)
- elif request.HasField('file_containing_extension'):
- yield self._file_containing_extension(
- request.file_containing_extension.containing_type,
- request.file_containing_extension.extension_number)
- elif request.HasField('all_extension_numbers_of_type'):
- yield _all_extension_numbers_of_type(
- request.all_extension_numbers_of_type)
- elif request.HasField('list_services'):
- yield self._list_services()
- else:
- yield reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
- error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1].encode(),
- )
- )
-
+ ]))
+
+ def ServerReflectionInfo(self, request_iterator, context):
+ for request in request_iterator:
+ if request.HasField('file_by_filename'):
+ yield self._file_by_filename(request.file_by_filename)
+ elif request.HasField('file_containing_symbol'):
+ yield self._file_containing_symbol(
+ request.file_containing_symbol)
+ elif request.HasField('file_containing_extension'):
+ yield self._file_containing_extension(
+ request.file_containing_extension.containing_type,
+ request.file_containing_extension.extension_number)
+ elif request.HasField('all_extension_numbers_of_type'):
+ yield _all_extension_numbers_of_type(
+ request.all_extension_numbers_of_type)
+ elif request.HasField('list_services'):
+ yield self._list_services()
+ else:
+ yield reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
+ error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1]
+ .encode(),))
diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py
index dee5491e0a..62237e0971 100644
--- a/src/python/grpcio_reflection/reflection_commands.py
+++ b/src/python/grpcio_reflection/reflection_commands.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Provides distutils command classes for the GRPC Python setup process."""
import os
@@ -35,44 +34,46 @@ import shutil
import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
-HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/reflection/v1alpha/reflection.proto')
+HEALTH_PROTO = os.path.join(
+ ROOT_DIR, '../../proto/grpc/reflection/v1alpha/reflection.proto')
class CopyProtoModules(setuptools.Command):
- """Command to copy proto modules from grpc/src/proto."""
+ """Command to copy proto modules from grpc/src/proto."""
- description = ''
- user_options = []
+ description = ''
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- if os.path.isfile(HEALTH_PROTO):
- shutil.copyfile(
- HEALTH_PROTO,
- os.path.join(ROOT_DIR, 'grpc_reflection/v1alpha/reflection.proto'))
+ def run(self):
+ if os.path.isfile(HEALTH_PROTO):
+ shutil.copyfile(
+ HEALTH_PROTO,
+ os.path.join(ROOT_DIR,
+ 'grpc_reflection/v1alpha/reflection.proto'))
class BuildPackageProtos(setuptools.Command):
- """Command to generate project *_pb2.py modules from proto files."""
+ """Command to generate project *_pb2.py modules from proto files."""
- description = 'build grpc protobuf modules'
- user_options = []
+ description = 'build grpc protobuf modules'
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- # due to limitations of the proto generator, we require that only *one*
- # directory is provided as an 'include' directory. We assume it's the '' key
- # to `self.distribution.package_dir` (and get a key error if it's not
- # there).
- from grpc_tools import command
- command.build_package_protos(self.distribution.package_dir[''])
+ def run(self):
+ # due to limitations of the proto generator, we require that only *one*
+ # directory is provided as an 'include' directory. We assume it's the '' key
+ # to `self.distribution.package_dir` (and get a key error if it's not
+ # there).
+ from grpc_tools import command
+ command.build_package_protos(self.distribution.package_dir[''])
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index cfc41f4fe7..2926923029 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Setup module for the GRPC Python package's optional reflection."""
import os
@@ -41,18 +40,14 @@ os.chdir(os.path.dirname(os.path.abspath(__file__)))
import reflection_commands
import grpc_version
-PACKAGE_DIRECTORIES = {
- '': '.',
-}
+PACKAGE_DIRECTORIES = {'': '.',}
SETUP_REQUIRES = (
- 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
-)
+ 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),)
INSTALL_REQUIRES = (
'protobuf>=3.0.0',
- 'grpcio>={version}'.format(version=grpc_version.VERSION),
-)
+ 'grpcio>={version}'.format(version=grpc_version.VERSION),)
COMMAND_CLASS = {
# Run preprocess from the repository *before* doing any packaging!
@@ -68,5 +63,4 @@ setuptools.setup(
packages=setuptools.find_packages('.'),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
- cmdclass=COMMAND_CLASS
-)
+ cmdclass=COMMAND_CLASS)
diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py
index e822971fe0..845b7f598c 100644
--- a/src/python/grpcio_tests/commands.py
+++ b/src/python/grpcio_tests/commands.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Provides distutils command classes for the gRPC Python setup process."""
import distutils
@@ -55,163 +54,162 @@ PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class CommandError(object):
- pass
+ pass
class GatherProto(setuptools.Command):
- description = 'gather proto dependencies'
- user_options = []
+ description = 'gather proto dependencies'
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- pass
+ def finalize_options(self):
+ pass
- def run(self):
- # TODO(atash) ensure that we're running from the repository directory when
- # this command is used
- try:
- shutil.rmtree(PROTO_STEM)
- except Exception as error:
- # We don't care if this command fails
- pass
- shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
- for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
- path = os.path.join(root, '__init__.py')
- open(path, 'a').close()
+ def run(self):
+ # TODO(atash) ensure that we're running from the repository directory when
+ # this command is used
+ try:
+ shutil.rmtree(PROTO_STEM)
+ except Exception as error:
+ # We don't care if this command fails
+ pass
+ shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
+ for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
+ path = os.path.join(root, '__init__.py')
+ open(path, 'a').close()
class BuildProtoModules(setuptools.Command):
- """Command to generate project *_pb2.py modules from proto files."""
-
- description = 'build protobuf modules'
- user_options = [
- ('include=', None, 'path patterns to include in protobuf generation'),
- ('exclude=', None, 'path patterns to exclude from protobuf generation')
- ]
-
- def initialize_options(self):
- self.exclude = None
- self.include = r'.*\.proto$'
-
- def finalize_options(self):
- pass
-
- def run(self):
- import grpc_tools.protoc as protoc
-
- include_regex = re.compile(self.include)
- exclude_regex = re.compile(self.exclude) if self.exclude else None
- paths = []
- for walk_root, directories, filenames in os.walk(PROTO_STEM):
- for filename in filenames:
- path = os.path.join(walk_root, filename)
- if include_regex.match(path) and not (
- exclude_regex and exclude_regex.match(path)):
- paths.append(path)
-
- # TODO(kpayson): It would be nice to do this in a batch command,
- # but we currently have name conflicts in src/proto
- for path in paths:
- command = [
- 'grpc_tools.protoc',
- '-I {}'.format(PROTO_STEM),
- '--python_out={}'.format(PROTO_STEM),
- '--grpc_python_out={}'.format(PROTO_STEM),
- ] + [path]
- if protoc.main(command) != 0:
- sys.stderr.write(
- 'warning: Command:\n{}\nFailed'.format(
- command))
-
- # Generated proto directories dont include __init__.py, but
- # these are needed for python package resolution
- for walk_root, _, _ in os.walk(PROTO_STEM):
- path = os.path.join(walk_root, '__init__.py')
- open(path, 'a').close()
+ """Command to generate project *_pb2.py modules from proto files."""
+
+ description = 'build protobuf modules'
+ user_options = [
+ ('include=', None, 'path patterns to include in protobuf generation'),
+ ('exclude=', None, 'path patterns to exclude from protobuf generation')
+ ]
+
+ def initialize_options(self):
+ self.exclude = None
+ self.include = r'.*\.proto$'
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ import grpc_tools.protoc as protoc
+
+ include_regex = re.compile(self.include)
+ exclude_regex = re.compile(self.exclude) if self.exclude else None
+ paths = []
+ for walk_root, directories, filenames in os.walk(PROTO_STEM):
+ for filename in filenames:
+ path = os.path.join(walk_root, filename)
+ if include_regex.match(path) and not (
+ exclude_regex and exclude_regex.match(path)):
+ paths.append(path)
+
+ # TODO(kpayson): It would be nice to do this in a batch command,
+ # but we currently have name conflicts in src/proto
+ for path in paths:
+ command = [
+ 'grpc_tools.protoc',
+ '-I {}'.format(PROTO_STEM),
+ '--python_out={}'.format(PROTO_STEM),
+ '--grpc_python_out={}'.format(PROTO_STEM),
+ ] + [path]
+ if protoc.main(command) != 0:
+ sys.stderr.write('warning: Command:\n{}\nFailed'.format(
+ command))
+
+ # Generated proto directories dont include __init__.py, but
+ # these are needed for python package resolution
+ for walk_root, _, _ in os.walk(PROTO_STEM):
+ path = os.path.join(walk_root, '__init__.py')
+ open(path, 'a').close()
class BuildPy(build_py.build_py):
- """Custom project build command."""
+ """Custom project build command."""
- def run(self):
- try:
- self.run_command('build_package_protos')
- except CommandError as error:
- sys.stderr.write('warning: %s\n' % error.message)
- build_py.build_py.run(self)
+ def run(self):
+ try:
+ self.run_command('build_package_protos')
+ except CommandError as error:
+ sys.stderr.write('warning: %s\n' % error.message)
+ build_py.build_py.run(self)
class TestLite(setuptools.Command):
- """Command to run tests without fetching or building anything."""
+ """Command to run tests without fetching or building anything."""
- description = 'run tests without fetching or building anything.'
- user_options = []
+ description = 'run tests without fetching or building anything.'
+ user_options = []
- def initialize_options(self):
- pass
+ def initialize_options(self):
+ pass
- def finalize_options(self):
- # distutils requires this override.
- pass
+ def finalize_options(self):
+ # distutils requires this override.
+ pass
- def run(self):
- self._add_eggs_to_path()
+ def run(self):
+ self._add_eggs_to_path()
- import tests
- loader = tests.Loader()
- loader.loadTestsFromNames(['tests'])
- runner = tests.Runner()
- result = runner.run(loader.suite)
- if not result.wasSuccessful():
- sys.exit('Test failure')
+ import tests
+ loader = tests.Loader()
+ loader.loadTestsFromNames(['tests'])
+ runner = tests.Runner()
+ result = runner.run(loader.suite)
+ if not result.wasSuccessful():
+ sys.exit('Test failure')
- def _add_eggs_to_path(self):
- """Fetch install and test requirements"""
- self.distribution.fetch_build_eggs(self.distribution.install_requires)
- self.distribution.fetch_build_eggs(self.distribution.tests_require)
+ def _add_eggs_to_path(self):
+ """Fetch install and test requirements"""
+ self.distribution.fetch_build_eggs(self.distribution.install_requires)
+ self.distribution.fetch_build_eggs(self.distribution.tests_require)
class RunInterop(test.test):
- description = 'run interop test client/server'
- user_options = [
- ('args=', 'a', 'pass-thru arguments for the client/server'),
- ('client', 'c', 'flag indicating to run the client'),
- ('server', 's', 'flag indicating to run the server')
- ]
-
- def initialize_options(self):
- self.args = ''
- self.client = False
- self.server = False
-
- def finalize_options(self):
- if self.client and self.server:
- raise DistutilsOptionError('you may only specify one of client or server')
-
- def run(self):
- if self.distribution.install_requires:
- self.distribution.fetch_build_eggs(self.distribution.install_requires)
- if self.distribution.tests_require:
- self.distribution.fetch_build_eggs(self.distribution.tests_require)
- if self.client:
- self.run_client()
- elif self.server:
- self.run_server()
-
- def run_server(self):
- # We import here to ensure that our setuptools parent has had a chance to
- # edit the Python system path.
- from tests.interop import server
- sys.argv[1:] = self.args.split()
- server.serve()
-
- def run_client(self):
- # We import here to ensure that our setuptools parent has had a chance to
- # edit the Python system path.
- from tests.interop import client
- sys.argv[1:] = self.args.split()
- client.test_interoperability()
+ description = 'run interop test client/server'
+ user_options = [('args=', 'a', 'pass-thru arguments for the client/server'),
+ ('client', 'c', 'flag indicating to run the client'),
+ ('server', 's', 'flag indicating to run the server')]
+
+ def initialize_options(self):
+ self.args = ''
+ self.client = False
+ self.server = False
+
+ def finalize_options(self):
+ if self.client and self.server:
+ raise DistutilsOptionError(
+ 'you may only specify one of client or server')
+
+ def run(self):
+ if self.distribution.install_requires:
+ self.distribution.fetch_build_eggs(
+ self.distribution.install_requires)
+ if self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(self.distribution.tests_require)
+ if self.client:
+ self.run_client()
+ elif self.server:
+ self.run_server()
+
+ def run_server(self):
+ # We import here to ensure that our setuptools parent has had a chance to
+ # edit the Python system path.
+ from tests.interop import server
+ sys.argv[1:] = self.args.split()
+ server.serve()
+
+ def run_client(self):
+ # We import here to ensure that our setuptools parent has had a chance to
+ # edit the Python system path.
+ from tests.interop import client
+ sys.argv[1:] = self.args.split()
+ client.test_interoperability()
diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py
index 375fbd6c77..f0407d1a55 100644
--- a/src/python/grpcio_tests/setup.py
+++ b/src/python/grpcio_tests/setup.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""A setup module for the gRPC Python package."""
import os
@@ -48,9 +47,7 @@ import grpc_version
LICENSE = '3-clause BSD'
-PACKAGE_DIRECTORIES = {
- '': '.',
-}
+PACKAGE_DIRECTORIES = {'': '.',}
INSTALL_REQUIRES = (
'coverage>=4.0',
@@ -61,13 +58,11 @@ INSTALL_REQUIRES = (
'grpcio-health-checking>={version}'.format(version=grpc_version.VERSION),
'oauth2client>=1.4.7',
'protobuf>=3.0.0',
- 'six>=1.10',
-)
+ 'six>=1.10',)
COMMAND_CLASS = {
# Run `preprocess` *before* doing any packaging!
'preprocess': commands.GatherProto,
-
'build_package_protos': grpc_tools.command.BuildPackageProtos,
'build_py': commands.BuildPy,
'run_interop': commands.RunInterop,
@@ -80,9 +75,7 @@ PACKAGE_DATA = {
'credentials/server1.key',
'credentials/server1.pem',
],
- 'tests.protoc_plugin.protos.invocation_testing': [
- 'same.proto',
- ],
+ 'tests.protoc_plugin.protos.invocation_testing': ['same.proto',],
'tests.protoc_plugin.protos.invocation_testing.split_messages': [
'messages.proto',
],
@@ -94,9 +87,7 @@ PACKAGE_DATA = {
'credentials/server1.key',
'credentials/server1.pem',
],
- 'tests': [
- 'tests.json'
- ],
+ 'tests': ['tests.json'],
}
TEST_SUITE = 'tests'
@@ -107,16 +98,15 @@ TESTS_REQUIRE = INSTALL_REQUIRES
PACKAGES = setuptools.find_packages('.')
setuptools.setup(
- name='grpcio-tests',
- version=grpc_version.VERSION,
- license=LICENSE,
- packages=list(PACKAGES),
- package_dir=PACKAGE_DIRECTORIES,
- package_data=PACKAGE_DATA,
- install_requires=INSTALL_REQUIRES,
- cmdclass=COMMAND_CLASS,
- tests_require=TESTS_REQUIRE,
- test_suite=TEST_SUITE,
- test_loader=TEST_LOADER,
- test_runner=TEST_RUNNER,
-)
+ name='grpcio-tests',
+ version=grpc_version.VERSION,
+ license=LICENSE,
+ packages=list(PACKAGES),
+ package_dir=PACKAGE_DIRECTORIES,
+ package_data=PACKAGE_DATA,
+ install_requires=INSTALL_REQUIRES,
+ cmdclass=COMMAND_CLASS,
+ tests_require=TESTS_REQUIRE,
+ test_suite=TEST_SUITE,
+ test_loader=TEST_LOADER,
+ test_runner=TEST_RUNNER,)
diff --git a/src/python/grpcio_tests/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py
index 621bedc7bb..42cf9ab4ca 100644
--- a/src/python/grpcio_tests/tests/_loader.py
+++ b/src/python/grpcio_tests/tests/_loader.py
@@ -40,7 +40,7 @@ TEST_MODULE_REGEX = r'^.*_test$'
class Loader(object):
- """Test loader for setuptools test suite support.
+ """Test loader for setuptools test suite support.
Attributes:
suite (unittest.TestSuite): All tests collected by the loader.
@@ -51,57 +51,57 @@ class Loader(object):
contributes to the test suite.
"""
- def __init__(self):
- self.suite = unittest.TestSuite()
- self.loader = unittest.TestLoader()
- self.module_matcher = re.compile(TEST_MODULE_REGEX)
+ def __init__(self):
+ self.suite = unittest.TestSuite()
+ self.loader = unittest.TestLoader()
+ self.module_matcher = re.compile(TEST_MODULE_REGEX)
- def loadTestsFromNames(self, names, module=None):
- """Function mirroring TestLoader::loadTestsFromNames, as expected by
+ def loadTestsFromNames(self, names, module=None):
+ """Function mirroring TestLoader::loadTestsFromNames, as expected by
setuptools.setup argument `test_loader`."""
- # ensure that we capture decorators and definitions (else our coverage
- # measure unnecessarily suffers)
- coverage_context = coverage.Coverage(data_suffix=True)
- coverage_context.start()
- modules = [importlib.import_module(name) for name in names]
- for module in modules:
- self.visit_module(module)
- for module in modules:
- try:
- package_paths = module.__path__
- except:
- continue
- self.walk_packages(package_paths)
- coverage_context.stop()
- coverage_context.save()
- return self.suite
-
- def walk_packages(self, package_paths):
- """Walks over the packages, dispatching `visit_module` calls.
+ # ensure that we capture decorators and definitions (else our coverage
+ # measure unnecessarily suffers)
+ coverage_context = coverage.Coverage(data_suffix=True)
+ coverage_context.start()
+ modules = [importlib.import_module(name) for name in names]
+ for module in modules:
+ self.visit_module(module)
+ for module in modules:
+ try:
+ package_paths = module.__path__
+ except:
+ continue
+ self.walk_packages(package_paths)
+ coverage_context.stop()
+ coverage_context.save()
+ return self.suite
+
+ def walk_packages(self, package_paths):
+ """Walks over the packages, dispatching `visit_module` calls.
Args:
package_paths (list): A list of paths over which to walk through modules
along.
"""
- for importer, module_name, is_package in (
- pkgutil.walk_packages(package_paths)):
- module = importer.find_module(module_name).load_module(module_name)
- self.visit_module(module)
+ for importer, module_name, is_package in (
+ pkgutil.walk_packages(package_paths)):
+ module = importer.find_module(module_name).load_module(module_name)
+ self.visit_module(module)
- def visit_module(self, module):
- """Visits the module, adding discovered tests to the test suite.
+ def visit_module(self, module):
+ """Visits the module, adding discovered tests to the test suite.
Args:
module (module): Module to match against self.module_matcher; if matched
it has its tests loaded via self.loader into self.suite.
"""
- if self.module_matcher.match(module.__name__):
- module_suite = self.loader.loadTestsFromModule(module)
- self.suite.addTest(module_suite)
+ if self.module_matcher.match(module.__name__):
+ module_suite = self.loader.loadTestsFromModule(module)
+ self.suite.addTest(module_suite)
def iterate_suite_cases(suite):
- """Generator over all unittest.TestCases in a unittest.TestSuite.
+ """Generator over all unittest.TestCases in a unittest.TestSuite.
Args:
suite (unittest.TestSuite): Suite to iterate over in the generator.
@@ -109,11 +109,12 @@ def iterate_suite_cases(suite):
Returns:
generator: A generator over all unittest.TestCases in `suite`.
"""
- for item in suite:
- if isinstance(item, unittest.TestSuite):
- for child_item in iterate_suite_cases(item):
- yield child_item
- elif isinstance(item, unittest.TestCase):
- yield item
- else:
- raise ValueError('unexpected suite item of type {}'.format(type(item)))
+ for item in suite:
+ if isinstance(item, unittest.TestSuite):
+ for child_item in iterate_suite_cases(item):
+ yield child_item
+ elif isinstance(item, unittest.TestCase):
+ yield item
+ else:
+ raise ValueError('unexpected suite item of type {}'.format(
+ type(item)))
diff --git a/src/python/grpcio_tests/tests/_result.py b/src/python/grpcio_tests/tests/_result.py
index 1acec6a9b5..794b7540f1 100644
--- a/src/python/grpcio_tests/tests/_result.py
+++ b/src/python/grpcio_tests/tests/_result.py
@@ -41,9 +41,11 @@ from six import moves
from tests import _loader
-class CaseResult(collections.namedtuple('CaseResult', [
- 'id', 'name', 'kind', 'stdout', 'stderr', 'skip_reason', 'traceback'])):
- """A serializable result of a single test case.
+class CaseResult(
+ collections.namedtuple('CaseResult', [
+ 'id', 'name', 'kind', 'stdout', 'stderr', 'skip_reason', 'traceback'
+ ])):
+ """A serializable result of a single test case.
Attributes:
id (object): Any serializable object used to denote the identity of this
@@ -59,62 +61,78 @@ class CaseResult(collections.namedtuple('CaseResult', [
None.
"""
- class Kind:
- UNTESTED = 'untested'
- RUNNING = 'running'
- ERROR = 'error'
- FAILURE = 'failure'
- SUCCESS = 'success'
- SKIP = 'skip'
- EXPECTED_FAILURE = 'expected failure'
- UNEXPECTED_SUCCESS = 'unexpected success'
-
- def __new__(cls, id=None, name=None, kind=None, stdout=None, stderr=None,
- skip_reason=None, traceback=None):
- """Helper keyword constructor for the namedtuple.
+ class Kind:
+ UNTESTED = 'untested'
+ RUNNING = 'running'
+ ERROR = 'error'
+ FAILURE = 'failure'
+ SUCCESS = 'success'
+ SKIP = 'skip'
+ EXPECTED_FAILURE = 'expected failure'
+ UNEXPECTED_SUCCESS = 'unexpected success'
+
+ def __new__(cls,
+ id=None,
+ name=None,
+ kind=None,
+ stdout=None,
+ stderr=None,
+ skip_reason=None,
+ traceback=None):
+ """Helper keyword constructor for the namedtuple.
See this class' attributes for information on the arguments."""
- assert id is not None
- assert name is None or isinstance(name, str)
- if kind is CaseResult.Kind.UNTESTED:
- pass
- elif kind is CaseResult.Kind.RUNNING:
- pass
- elif kind is CaseResult.Kind.ERROR:
- assert traceback is not None
- elif kind is CaseResult.Kind.FAILURE:
- assert traceback is not None
- elif kind is CaseResult.Kind.SUCCESS:
- pass
- elif kind is CaseResult.Kind.SKIP:
- assert skip_reason is not None
- elif kind is CaseResult.Kind.EXPECTED_FAILURE:
- assert traceback is not None
- elif kind is CaseResult.Kind.UNEXPECTED_SUCCESS:
- pass
- else:
- assert False
- return super(cls, CaseResult).__new__(
- cls, id, name, kind, stdout, stderr, skip_reason, traceback)
-
- def updated(self, name=None, kind=None, stdout=None, stderr=None,
- skip_reason=None, traceback=None):
- """Get a new validated CaseResult with the fields updated.
+ assert id is not None
+ assert name is None or isinstance(name, str)
+ if kind is CaseResult.Kind.UNTESTED:
+ pass
+ elif kind is CaseResult.Kind.RUNNING:
+ pass
+ elif kind is CaseResult.Kind.ERROR:
+ assert traceback is not None
+ elif kind is CaseResult.Kind.FAILURE:
+ assert traceback is not None
+ elif kind is CaseResult.Kind.SUCCESS:
+ pass
+ elif kind is CaseResult.Kind.SKIP:
+ assert skip_reason is not None
+ elif kind is CaseResult.Kind.EXPECTED_FAILURE:
+ assert traceback is not None
+ elif kind is CaseResult.Kind.UNEXPECTED_SUCCESS:
+ pass
+ else:
+ assert False
+ return super(cls, CaseResult).__new__(cls, id, name, kind, stdout,
+ stderr, skip_reason, traceback)
+
+ def updated(self,
+ name=None,
+ kind=None,
+ stdout=None,
+ stderr=None,
+ skip_reason=None,
+ traceback=None):
+ """Get a new validated CaseResult with the fields updated.
See this class' attributes for information on the arguments."""
- name = self.name if name is None else name
- kind = self.kind if kind is None else kind
- stdout = self.stdout if stdout is None else stdout
- stderr = self.stderr if stderr is None else stderr
- skip_reason = self.skip_reason if skip_reason is None else skip_reason
- traceback = self.traceback if traceback is None else traceback
- return CaseResult(id=self.id, name=name, kind=kind, stdout=stdout,
- stderr=stderr, skip_reason=skip_reason,
- traceback=traceback)
+ name = self.name if name is None else name
+ kind = self.kind if kind is None else kind
+ stdout = self.stdout if stdout is None else stdout
+ stderr = self.stderr if stderr is None else stderr
+ skip_reason = self.skip_reason if skip_reason is None else skip_reason
+ traceback = self.traceback if traceback is None else traceback
+ return CaseResult(
+ id=self.id,
+ name=name,
+ kind=kind,
+ stdout=stdout,
+ stderr=stderr,
+ skip_reason=skip_reason,
+ traceback=traceback)
class AugmentedResult(unittest.TestResult):
- """unittest.Result that keeps track of additional information.
+ """unittest.Result that keeps track of additional information.
Uses CaseResult objects to store test-case results, providing additional
information beyond that of the standard Python unittest library, such as
@@ -127,228 +145,215 @@ class AugmentedResult(unittest.TestResult):
to CaseResult objects corresponding to those IDs.
"""
- def __init__(self, id_map):
- """Initialize the object with an identifier mapping.
+ def __init__(self, id_map):
+ """Initialize the object with an identifier mapping.
Arguments:
id_map (callable): Corresponds to the attribute `id_map`."""
- super(AugmentedResult, self).__init__()
- self.id_map = id_map
- self.cases = None
-
- def startTestRun(self):
- """See unittest.TestResult.startTestRun."""
- super(AugmentedResult, self).startTestRun()
- self.cases = dict()
-
- def stopTestRun(self):
- """See unittest.TestResult.stopTestRun."""
- super(AugmentedResult, self).stopTestRun()
-
- def startTest(self, test):
- """See unittest.TestResult.startTest."""
- super(AugmentedResult, self).startTest(test)
- case_id = self.id_map(test)
- self.cases[case_id] = CaseResult(
- id=case_id, name=test.id(), kind=CaseResult.Kind.RUNNING)
-
- def addError(self, test, error):
- """See unittest.TestResult.addError."""
- super(AugmentedResult, self).addError(test, error)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.ERROR, traceback=error)
-
- def addFailure(self, test, error):
- """See unittest.TestResult.addFailure."""
- super(AugmentedResult, self).addFailure(test, error)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.FAILURE, traceback=error)
-
- def addSuccess(self, test):
- """See unittest.TestResult.addSuccess."""
- super(AugmentedResult, self).addSuccess(test)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.SUCCESS)
-
- def addSkip(self, test, reason):
- """See unittest.TestResult.addSkip."""
- super(AugmentedResult, self).addSkip(test, reason)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.SKIP, skip_reason=reason)
-
- def addExpectedFailure(self, test, error):
- """See unittest.TestResult.addExpectedFailure."""
- super(AugmentedResult, self).addExpectedFailure(test, error)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.EXPECTED_FAILURE, traceback=error)
-
- def addUnexpectedSuccess(self, test):
- """See unittest.TestResult.addUnexpectedSuccess."""
- super(AugmentedResult, self).addUnexpectedSuccess(test)
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- kind=CaseResult.Kind.UNEXPECTED_SUCCESS)
-
- def set_output(self, test, stdout, stderr):
- """Set the output attributes for the CaseResult corresponding to a test.
+ super(AugmentedResult, self).__init__()
+ self.id_map = id_map
+ self.cases = None
+
+ def startTestRun(self):
+ """See unittest.TestResult.startTestRun."""
+ super(AugmentedResult, self).startTestRun()
+ self.cases = dict()
+
+ def stopTestRun(self):
+ """See unittest.TestResult.stopTestRun."""
+ super(AugmentedResult, self).stopTestRun()
+
+ def startTest(self, test):
+ """See unittest.TestResult.startTest."""
+ super(AugmentedResult, self).startTest(test)
+ case_id = self.id_map(test)
+ self.cases[case_id] = CaseResult(
+ id=case_id, name=test.id(), kind=CaseResult.Kind.RUNNING)
+
+ def addError(self, test, error):
+ """See unittest.TestResult.addError."""
+ super(AugmentedResult, self).addError(test, error)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.ERROR, traceback=error)
+
+ def addFailure(self, test, error):
+ """See unittest.TestResult.addFailure."""
+ super(AugmentedResult, self).addFailure(test, error)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.FAILURE, traceback=error)
+
+ def addSuccess(self, test):
+ """See unittest.TestResult.addSuccess."""
+ super(AugmentedResult, self).addSuccess(test)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.SUCCESS)
+
+ def addSkip(self, test, reason):
+ """See unittest.TestResult.addSkip."""
+ super(AugmentedResult, self).addSkip(test, reason)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.SKIP, skip_reason=reason)
+
+ def addExpectedFailure(self, test, error):
+ """See unittest.TestResult.addExpectedFailure."""
+ super(AugmentedResult, self).addExpectedFailure(test, error)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.EXPECTED_FAILURE, traceback=error)
+
+ def addUnexpectedSuccess(self, test):
+ """See unittest.TestResult.addUnexpectedSuccess."""
+ super(AugmentedResult, self).addUnexpectedSuccess(test)
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ kind=CaseResult.Kind.UNEXPECTED_SUCCESS)
+
+ def set_output(self, test, stdout, stderr):
+ """Set the output attributes for the CaseResult corresponding to a test.
Args:
test (unittest.TestCase): The TestCase to set the outputs of.
stdout (str): Output from stdout to assign to self.id_map(test).
stderr (str): Output from stderr to assign to self.id_map(test).
"""
- case_id = self.id_map(test)
- self.cases[case_id] = self.cases[case_id].updated(
- stdout=stdout.decode(), stderr=stderr.decode())
+ case_id = self.id_map(test)
+ self.cases[case_id] = self.cases[case_id].updated(
+ stdout=stdout.decode(), stderr=stderr.decode())
- def augmented_results(self, filter):
- """Convenience method to retrieve filtered case results.
+ def augmented_results(self, filter):
+ """Convenience method to retrieve filtered case results.
Args:
filter (callable): A unary predicate to filter over CaseResult objects.
"""
- return (self.cases[case_id] for case_id in self.cases
- if filter(self.cases[case_id]))
+ return (self.cases[case_id] for case_id in self.cases
+ if filter(self.cases[case_id]))
class CoverageResult(AugmentedResult):
- """Extension to AugmentedResult adding coverage.py support per test.\
+ """Extension to AugmentedResult adding coverage.py support per test.\
Attributes:
coverage_context (coverage.Coverage): coverage.py management object.
"""
- def __init__(self, id_map):
- """See AugmentedResult.__init__."""
- super(CoverageResult, self).__init__(id_map=id_map)
- self.coverage_context = None
+ def __init__(self, id_map):
+ """See AugmentedResult.__init__."""
+ super(CoverageResult, self).__init__(id_map=id_map)
+ self.coverage_context = None
- def startTest(self, test):
- """See unittest.TestResult.startTest.
+ def startTest(self, test):
+ """See unittest.TestResult.startTest.
Additionally initializes and begins code coverage tracking."""
- super(CoverageResult, self).startTest(test)
- self.coverage_context = coverage.Coverage(data_suffix=True)
- self.coverage_context.start()
+ super(CoverageResult, self).startTest(test)
+ self.coverage_context = coverage.Coverage(data_suffix=True)
+ self.coverage_context.start()
- def stopTest(self, test):
- """See unittest.TestResult.stopTest.
+ def stopTest(self, test):
+ """See unittest.TestResult.stopTest.
Additionally stops and deinitializes code coverage tracking."""
- super(CoverageResult, self).stopTest(test)
- self.coverage_context.stop()
- self.coverage_context.save()
- self.coverage_context = None
+ super(CoverageResult, self).stopTest(test)
+ self.coverage_context.stop()
+ self.coverage_context.save()
+ self.coverage_context = None
- def stopTestRun(self):
- """See unittest.TestResult.stopTestRun."""
- super(CoverageResult, self).stopTestRun()
- # TODO(atash): Dig deeper into why the following line fails to properly
- # combine coverage data from the Cython plugin.
- #coverage.Coverage().combine()
+ def stopTestRun(self):
+ """See unittest.TestResult.stopTestRun."""
+ super(CoverageResult, self).stopTestRun()
+ # TODO(atash): Dig deeper into why the following line fails to properly
+ # combine coverage data from the Cython plugin.
+ #coverage.Coverage().combine()
class _Colors:
- """Namespaced constants for terminal color magic numbers."""
- HEADER = '\033[95m'
- INFO = '\033[94m'
- OK = '\033[92m'
- WARN = '\033[93m'
- FAIL = '\033[91m'
- BOLD = '\033[1m'
- UNDERLINE = '\033[4m'
- END = '\033[0m'
+ """Namespaced constants for terminal color magic numbers."""
+ HEADER = '\033[95m'
+ INFO = '\033[94m'
+ OK = '\033[92m'
+ WARN = '\033[93m'
+ FAIL = '\033[91m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+ END = '\033[0m'
class TerminalResult(CoverageResult):
- """Extension to CoverageResult adding basic terminal reporting."""
+ """Extension to CoverageResult adding basic terminal reporting."""
- def __init__(self, out, id_map):
- """Initialize the result object.
+ def __init__(self, out, id_map):
+ """Initialize the result object.
Args:
out (file-like): Output file to which terminal-colored live results will
be written.
id_map (callable): See AugmentedResult.__init__.
"""
- super(TerminalResult, self).__init__(id_map=id_map)
- self.out = out
-
- def startTestRun(self):
- """See unittest.TestResult.startTestRun."""
- super(TerminalResult, self).startTestRun()
- self.out.write(
- _Colors.HEADER +
- 'Testing gRPC Python...\n' +
- _Colors.END)
-
- def stopTestRun(self):
- """See unittest.TestResult.stopTestRun."""
- super(TerminalResult, self).stopTestRun()
- self.out.write(summary(self))
- self.out.flush()
-
- def addError(self, test, error):
- """See unittest.TestResult.addError."""
- super(TerminalResult, self).addError(test, error)
- self.out.write(
- _Colors.FAIL +
- 'ERROR {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
-
- def addFailure(self, test, error):
- """See unittest.TestResult.addFailure."""
- super(TerminalResult, self).addFailure(test, error)
- self.out.write(
- _Colors.FAIL +
- 'FAILURE {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
-
- def addSuccess(self, test):
- """See unittest.TestResult.addSuccess."""
- super(TerminalResult, self).addSuccess(test)
- self.out.write(
- _Colors.OK +
- 'SUCCESS {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
-
- def addSkip(self, test, reason):
- """See unittest.TestResult.addSkip."""
- super(TerminalResult, self).addSkip(test, reason)
- self.out.write(
- _Colors.INFO +
- 'SKIP {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
-
- def addExpectedFailure(self, test, error):
- """See unittest.TestResult.addExpectedFailure."""
- super(TerminalResult, self).addExpectedFailure(test, error)
- self.out.write(
- _Colors.INFO +
- 'FAILURE_OK {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
-
- def addUnexpectedSuccess(self, test):
- """See unittest.TestResult.addUnexpectedSuccess."""
- super(TerminalResult, self).addUnexpectedSuccess(test)
- self.out.write(
- _Colors.INFO +
- 'UNEXPECTED_OK {}\n'.format(test.id()) +
- _Colors.END)
- self.out.flush()
+ super(TerminalResult, self).__init__(id_map=id_map)
+ self.out = out
+
+ def startTestRun(self):
+ """See unittest.TestResult.startTestRun."""
+ super(TerminalResult, self).startTestRun()
+ self.out.write(_Colors.HEADER + 'Testing gRPC Python...\n' +
+ _Colors.END)
+
+ def stopTestRun(self):
+ """See unittest.TestResult.stopTestRun."""
+ super(TerminalResult, self).stopTestRun()
+ self.out.write(summary(self))
+ self.out.flush()
+
+ def addError(self, test, error):
+ """See unittest.TestResult.addError."""
+ super(TerminalResult, self).addError(test, error)
+ self.out.write(_Colors.FAIL + 'ERROR {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
+ def addFailure(self, test, error):
+ """See unittest.TestResult.addFailure."""
+ super(TerminalResult, self).addFailure(test, error)
+ self.out.write(_Colors.FAIL + 'FAILURE {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
+ def addSuccess(self, test):
+ """See unittest.TestResult.addSuccess."""
+ super(TerminalResult, self).addSuccess(test)
+ self.out.write(_Colors.OK + 'SUCCESS {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
+ def addSkip(self, test, reason):
+ """See unittest.TestResult.addSkip."""
+ super(TerminalResult, self).addSkip(test, reason)
+ self.out.write(_Colors.INFO + 'SKIP {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
+ def addExpectedFailure(self, test, error):
+ """See unittest.TestResult.addExpectedFailure."""
+ super(TerminalResult, self).addExpectedFailure(test, error)
+ self.out.write(_Colors.INFO + 'FAILURE_OK {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
+ def addUnexpectedSuccess(self, test):
+ """See unittest.TestResult.addUnexpectedSuccess."""
+ super(TerminalResult, self).addUnexpectedSuccess(test)
+ self.out.write(_Colors.INFO + 'UNEXPECTED_OK {}\n'.format(test.id()) +
+ _Colors.END)
+ self.out.flush()
+
def _traceback_string(type, value, trace):
- """Generate a descriptive string of a Python exception traceback.
+ """Generate a descriptive string of a Python exception traceback.
Args:
type (class): The type of the exception.
@@ -358,12 +363,13 @@ def _traceback_string(type, value, trace):
Returns:
str: Formatted exception descriptive string.
"""
- buffer = moves.cStringIO()
- traceback.print_exception(type, value, trace, file=buffer)
- return buffer.getvalue()
+ buffer = moves.cStringIO()
+ traceback.print_exception(type, value, trace, file=buffer)
+ return buffer.getvalue()
+
def summary(result):
- """A summary string of a result object.
+ """A summary string of a result object.
Args:
result (AugmentedResult): The result object to get the summary of.
@@ -371,62 +377,68 @@ def summary(result):
Returns:
str: The summary string.
"""
- assert isinstance(result, AugmentedResult)
- untested = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.UNTESTED))
- running = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.RUNNING))
- failures = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.FAILURE))
- errors = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.ERROR))
- successes = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.SUCCESS))
- skips = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.SKIP))
- expected_failures = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.EXPECTED_FAILURE))
- unexpected_successes = list(result.augmented_results(
- lambda case_result: case_result.kind is CaseResult.Kind.UNEXPECTED_SUCCESS))
- running_names = [case.name for case in running]
- finished_count = (len(failures) + len(errors) + len(successes) +
- len(expected_failures) + len(unexpected_successes))
- statistics = (
- '{finished} tests finished:\n'
- '\t{successful} successful\n'
- '\t{unsuccessful} unsuccessful\n'
- '\t{skipped} skipped\n'
- '\t{expected_fail} expected failures\n'
- '\t{unexpected_successful} unexpected successes\n'
- 'Interrupted Tests:\n'
- '\t{interrupted}\n'
- .format(finished=finished_count,
- successful=len(successes),
- unsuccessful=(len(failures)+len(errors)),
- skipped=len(skips),
- expected_fail=len(expected_failures),
- unexpected_successful=len(unexpected_successes),
- interrupted=str(running_names)))
- tracebacks = '\n\n'.join([
- (_Colors.FAIL + '{test_name}' + _Colors.END + '\n' +
- _Colors.BOLD + 'traceback:' + _Colors.END + '\n' +
- '{traceback}\n' +
- _Colors.BOLD + 'stdout:' + _Colors.END + '\n' +
- '{stdout}\n' +
- _Colors.BOLD + 'stderr:' + _Colors.END + '\n' +
- '{stderr}\n').format(
- test_name=result.name,
- traceback=_traceback_string(*result.traceback),
- stdout=result.stdout, stderr=result.stderr)
- for result in itertools.chain(failures, errors)
- ])
- notes = 'Unexpected successes: {}\n'.format([
- result.name for result in unexpected_successes])
- return statistics + '\nErrors/Failures: \n' + tracebacks + '\n' + notes
+ assert isinstance(result, AugmentedResult)
+ untested = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.UNTESTED))
+ running = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.RUNNING))
+ failures = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.FAILURE))
+ errors = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.ERROR))
+ successes = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.SUCCESS))
+ skips = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.SKIP))
+ expected_failures = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.EXPECTED_FAILURE
+ ))
+ unexpected_successes = list(
+ result.augmented_results(
+ lambda case_result: case_result.kind is CaseResult.Kind.UNEXPECTED_SUCCESS
+ ))
+ running_names = [case.name for case in running]
+ finished_count = (len(failures) + len(errors) + len(successes) +
+ len(expected_failures) + len(unexpected_successes))
+ statistics = ('{finished} tests finished:\n'
+ '\t{successful} successful\n'
+ '\t{unsuccessful} unsuccessful\n'
+ '\t{skipped} skipped\n'
+ '\t{expected_fail} expected failures\n'
+ '\t{unexpected_successful} unexpected successes\n'
+ 'Interrupted Tests:\n'
+ '\t{interrupted}\n'.format(
+ finished=finished_count,
+ successful=len(successes),
+ unsuccessful=(len(failures) + len(errors)),
+ skipped=len(skips),
+ expected_fail=len(expected_failures),
+ unexpected_successful=len(unexpected_successes),
+ interrupted=str(running_names)))
+ tracebacks = '\n\n'.join(
+ [(_Colors.FAIL + '{test_name}' + _Colors.END + '\n' + _Colors.BOLD +
+ 'traceback:' + _Colors.END + '\n' + '{traceback}\n' + _Colors.BOLD +
+ 'stdout:' + _Colors.END + '\n' + '{stdout}\n' + _Colors.BOLD +
+ 'stderr:' + _Colors.END + '\n' + '{stderr}\n').format(
+ test_name=result.name,
+ traceback=_traceback_string(*result.traceback),
+ stdout=result.stdout,
+ stderr=result.stderr)
+ for result in itertools.chain(failures, errors)])
+ notes = 'Unexpected successes: {}\n'.format(
+ [result.name for result in unexpected_successes])
+ return statistics + '\nErrors/Failures: \n' + tracebacks + '\n' + notes
def jenkins_junit_xml(result):
- """An XML tree object that when written is recognizable by Jenkins.
+ """An XML tree object that when written is recognizable by Jenkins.
Args:
result (AugmentedResult): The result object to get the junit xml output of.
@@ -434,20 +446,18 @@ def jenkins_junit_xml(result):
Returns:
ElementTree.ElementTree: The XML tree.
"""
- assert isinstance(result, AugmentedResult)
- root = ElementTree.Element('testsuites')
- suite = ElementTree.SubElement(root, 'testsuite', {
- 'name': 'Python gRPC tests',
- })
- for case in result.cases.values():
- if case.kind is CaseResult.Kind.SUCCESS:
- ElementTree.SubElement(suite, 'testcase', {
- 'name': case.name,
- })
- elif case.kind in (CaseResult.Kind.ERROR, CaseResult.Kind.FAILURE):
- case_xml = ElementTree.SubElement(suite, 'testcase', {
- 'name': case.name,
- })
- error_xml = ElementTree.SubElement(case_xml, 'error', {})
- error_xml.text = ''.format(case.stderr, case.traceback)
- return ElementTree.ElementTree(element=root)
+ assert isinstance(result, AugmentedResult)
+ root = ElementTree.Element('testsuites')
+ suite = ElementTree.SubElement(root, 'testsuite', {
+ 'name': 'Python gRPC tests',
+ })
+ for case in result.cases.values():
+ if case.kind is CaseResult.Kind.SUCCESS:
+ ElementTree.SubElement(suite, 'testcase', {'name': case.name,})
+ elif case.kind in (CaseResult.Kind.ERROR, CaseResult.Kind.FAILURE):
+ case_xml = ElementTree.SubElement(suite, 'testcase', {
+ 'name': case.name,
+ })
+ error_xml = ElementTree.SubElement(case_xml, 'error', {})
+ error_xml.text = ''.format(case.stderr, case.traceback)
+ return ElementTree.ElementTree(element=root)
diff --git a/src/python/grpcio_tests/tests/_runner.py b/src/python/grpcio_tests/tests/_runner.py
index 926dcbe23a..59964b271c 100644
--- a/src/python/grpcio_tests/tests/_runner.py
+++ b/src/python/grpcio_tests/tests/_runner.py
@@ -49,7 +49,7 @@ from tests import _result
class CaptureFile(object):
- """A context-managed file to redirect output to a byte array.
+ """A context-managed file to redirect output to a byte array.
Use by invoking `start` (`__enter__`) and at some point invoking `stop`
(`__exit__`). At any point after the initial call to `start` call `output` to
@@ -66,57 +66,56 @@ class CaptureFile(object):
Only non-None when self is started.
"""
- def __init__(self, fd):
- self._redirected_fd = fd
- self._saved_fd = os.dup(self._redirected_fd)
- self._into_file = None
+ def __init__(self, fd):
+ self._redirected_fd = fd
+ self._saved_fd = os.dup(self._redirected_fd)
+ self._into_file = None
- def output(self):
- """Get all output from the redirected-to file if it exists."""
- if self._into_file:
- self._into_file.seek(0)
- return bytes(self._into_file.read())
- else:
- return bytes()
+ def output(self):
+ """Get all output from the redirected-to file if it exists."""
+ if self._into_file:
+ self._into_file.seek(0)
+ return bytes(self._into_file.read())
+ else:
+ return bytes()
- def start(self):
- """Start redirection of writes to the file descriptor."""
- self._into_file = tempfile.TemporaryFile()
- os.dup2(self._into_file.fileno(), self._redirected_fd)
+ def start(self):
+ """Start redirection of writes to the file descriptor."""
+ self._into_file = tempfile.TemporaryFile()
+ os.dup2(self._into_file.fileno(), self._redirected_fd)
- def stop(self):
- """Stop redirection of writes to the file descriptor."""
- # n.b. this dup2 call auto-closes self._redirected_fd
- os.dup2(self._saved_fd, self._redirected_fd)
+ def stop(self):
+ """Stop redirection of writes to the file descriptor."""
+ # n.b. this dup2 call auto-closes self._redirected_fd
+ os.dup2(self._saved_fd, self._redirected_fd)
- def write_bypass(self, value):
- """Bypass the redirection and write directly to the original file.
+ def write_bypass(self, value):
+ """Bypass the redirection and write directly to the original file.
Arguments:
value (str): What to write to the original file.
"""
- if six.PY3 and not isinstance(value, six.binary_type):
- value = bytes(value, 'ascii')
- if self._saved_fd is None:
- os.write(self._redirect_fd, value)
- else:
- os.write(self._saved_fd, value)
+ if six.PY3 and not isinstance(value, six.binary_type):
+ value = bytes(value, 'ascii')
+ if self._saved_fd is None:
+ os.write(self._redirect_fd, value)
+ else:
+ os.write(self._saved_fd, value)
- def __enter__(self):
- self.start()
- return self
+ def __enter__(self):
+ self.start()
+ return self
- def __exit__(self, type, value, traceback):
- self.stop()
+ def __exit__(self, type, value, traceback):
+ self.stop()
- def close(self):
- """Close any resources used by self not closed by stop()."""
- os.close(self._saved_fd)
+ def close(self):
+ """Close any resources used by self not closed by stop()."""
+ os.close(self._saved_fd)
-class AugmentedCase(collections.namedtuple('AugmentedCase', [
- 'case', 'id'])):
- """A test case with a guaranteed unique externally specified identifier.
+class AugmentedCase(collections.namedtuple('AugmentedCase', ['case', 'id'])):
+ """A test case with a guaranteed unique externally specified identifier.
Attributes:
case (unittest.TestCase): TestCase we're decorating with an additional
@@ -125,105 +124,107 @@ class AugmentedCase(collections.namedtuple('AugmentedCase', [
purposes.
"""
- def __new__(cls, case, id=None):
- if id is None:
- id = uuid.uuid4()
- return super(cls, AugmentedCase).__new__(cls, case, id)
+ def __new__(cls, case, id=None):
+ if id is None:
+ id = uuid.uuid4()
+ return super(cls, AugmentedCase).__new__(cls, case, id)
class Runner(object):
- def run(self, suite):
- """See setuptools' test_runner setup argument for information."""
- # only run test cases with id starting with given prefix
- testcase_filter = os.getenv('GRPC_PYTHON_TESTRUNNER_FILTER')
- filtered_cases = []
- for case in _loader.iterate_suite_cases(suite):
- if not testcase_filter or case.id().startswith(testcase_filter):
- filtered_cases.append(case)
-
- # Ensure that every test case has no collision with any other test case in
- # the augmented results.
- augmented_cases = [AugmentedCase(case, uuid.uuid4())
- for case in filtered_cases]
- case_id_by_case = dict((augmented_case.case, augmented_case.id)
- for augmented_case in augmented_cases)
- result_out = moves.cStringIO()
- result = _result.TerminalResult(
- result_out, id_map=lambda case: case_id_by_case[case])
- stdout_pipe = CaptureFile(sys.stdout.fileno())
- stderr_pipe = CaptureFile(sys.stderr.fileno())
- kill_flag = [False]
-
- def sigint_handler(signal_number, frame):
- if signal_number == signal.SIGINT:
- kill_flag[0] = True # Python 2.7 not having 'local'... :-(
- signal.signal(signal_number, signal.SIG_DFL)
-
- def fault_handler(signal_number, frame):
- stdout_pipe.write_bypass(
- 'Received fault signal {}\nstdout:\n{}\n\nstderr:{}\n'
- .format(signal_number, stdout_pipe.output(),
- stderr_pipe.output()))
- os._exit(1)
-
- def check_kill_self():
- if kill_flag[0]:
- stdout_pipe.write_bypass('Stopping tests short...')
- result.stopTestRun()
- stdout_pipe.write_bypass(result_out.getvalue())
- stdout_pipe.write_bypass(
- '\ninterrupted stdout:\n{}\n'.format(stdout_pipe.output().decode()))
- stderr_pipe.write_bypass(
- '\ninterrupted stderr:\n{}\n'.format(stderr_pipe.output().decode()))
- os._exit(1)
- def try_set_handler(name, handler):
- try:
- signal.signal(getattr(signal, name), handler)
- except AttributeError:
- pass
- try_set_handler('SIGINT', sigint_handler)
- try_set_handler('SIGSEGV', fault_handler)
- try_set_handler('SIGBUS', fault_handler)
- try_set_handler('SIGABRT', fault_handler)
- try_set_handler('SIGFPE', fault_handler)
- try_set_handler('SIGILL', fault_handler)
- # Sometimes output will lag after a test has successfully finished; we
- # ignore such writes to our pipes.
- try_set_handler('SIGPIPE', signal.SIG_IGN)
-
- # Run the tests
- result.startTestRun()
- for augmented_case in augmented_cases:
- sys.stdout.write('Running {}\n'.format(augmented_case.case.id()))
- sys.stdout.flush()
- case_thread = threading.Thread(
- target=augmented_case.case.run, args=(result,))
- try:
- with stdout_pipe, stderr_pipe:
- case_thread.start()
- while case_thread.is_alive():
+ def run(self, suite):
+ """See setuptools' test_runner setup argument for information."""
+ # only run test cases with id starting with given prefix
+ testcase_filter = os.getenv('GRPC_PYTHON_TESTRUNNER_FILTER')
+ filtered_cases = []
+ for case in _loader.iterate_suite_cases(suite):
+ if not testcase_filter or case.id().startswith(testcase_filter):
+ filtered_cases.append(case)
+
+ # Ensure that every test case has no collision with any other test case in
+ # the augmented results.
+ augmented_cases = [
+ AugmentedCase(case, uuid.uuid4()) for case in filtered_cases
+ ]
+ case_id_by_case = dict((augmented_case.case, augmented_case.id)
+ for augmented_case in augmented_cases)
+ result_out = moves.cStringIO()
+ result = _result.TerminalResult(
+ result_out, id_map=lambda case: case_id_by_case[case])
+ stdout_pipe = CaptureFile(sys.stdout.fileno())
+ stderr_pipe = CaptureFile(sys.stderr.fileno())
+ kill_flag = [False]
+
+ def sigint_handler(signal_number, frame):
+ if signal_number == signal.SIGINT:
+ kill_flag[0] = True # Python 2.7 not having 'local'... :-(
+ signal.signal(signal_number, signal.SIG_DFL)
+
+ def fault_handler(signal_number, frame):
+ stdout_pipe.write_bypass(
+ 'Received fault signal {}\nstdout:\n{}\n\nstderr:{}\n'.format(
+ signal_number, stdout_pipe.output(), stderr_pipe.output()))
+ os._exit(1)
+
+ def check_kill_self():
+ if kill_flag[0]:
+ stdout_pipe.write_bypass('Stopping tests short...')
+ result.stopTestRun()
+ stdout_pipe.write_bypass(result_out.getvalue())
+ stdout_pipe.write_bypass('\ninterrupted stdout:\n{}\n'.format(
+ stdout_pipe.output().decode()))
+ stderr_pipe.write_bypass('\ninterrupted stderr:\n{}\n'.format(
+ stderr_pipe.output().decode()))
+ os._exit(1)
+
+ def try_set_handler(name, handler):
+ try:
+ signal.signal(getattr(signal, name), handler)
+ except AttributeError:
+ pass
+
+ try_set_handler('SIGINT', sigint_handler)
+ try_set_handler('SIGSEGV', fault_handler)
+ try_set_handler('SIGBUS', fault_handler)
+ try_set_handler('SIGABRT', fault_handler)
+ try_set_handler('SIGFPE', fault_handler)
+ try_set_handler('SIGILL', fault_handler)
+ # Sometimes output will lag after a test has successfully finished; we
+ # ignore such writes to our pipes.
+ try_set_handler('SIGPIPE', signal.SIG_IGN)
+
+ # Run the tests
+ result.startTestRun()
+ for augmented_case in augmented_cases:
+ sys.stdout.write('Running {}\n'.format(augmented_case.case.id(
+ )))
+ sys.stdout.flush()
+ case_thread = threading.Thread(
+ target=augmented_case.case.run, args=(result,))
+ try:
+ with stdout_pipe, stderr_pipe:
+ case_thread.start()
+ while case_thread.is_alive():
+ check_kill_self()
+ time.sleep(0)
+ case_thread.join()
+ except:
+ # re-raise the exception after forcing the with-block to end
+ raise
+ result.set_output(augmented_case.case,
+ stdout_pipe.output(), stderr_pipe.output())
+ sys.stdout.write(result_out.getvalue())
+ sys.stdout.flush()
+ result_out.truncate(0)
check_kill_self()
- time.sleep(0)
- case_thread.join()
- except:
- # re-raise the exception after forcing the with-block to end
- raise
- result.set_output(
- augmented_case.case, stdout_pipe.output(), stderr_pipe.output())
- sys.stdout.write(result_out.getvalue())
- sys.stdout.flush()
- result_out.truncate(0)
- check_kill_self()
- result.stopTestRun()
- stdout_pipe.close()
- stderr_pipe.close()
-
- # Report results
- sys.stdout.write(result_out.getvalue())
- sys.stdout.flush()
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- with open('report.xml', 'wb') as report_xml_file:
- _result.jenkins_junit_xml(result).write(report_xml_file)
- return result
-
+ result.stopTestRun()
+ stdout_pipe.close()
+ stderr_pipe.close()
+
+ # Report results
+ sys.stdout.write(result_out.getvalue())
+ sys.stdout.flush()
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ with open('report.xml', 'wb') as report_xml_file:
+ _result.jenkins_junit_xml(result).write(report_xml_file)
+ return result
diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
index 5dde72b169..363b4c5f99 100644
--- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
+++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc_health.v1.health."""
import unittest
@@ -41,55 +40,55 @@ from tests.unit.framework.common import test_constants
class HealthServicerTest(unittest.TestCase):
- def setUp(self):
- servicer = health.HealthServicer()
- servicer.set('', health_pb2.HealthCheckResponse.SERVING)
- servicer.set('grpc.test.TestServiceServing',
- health_pb2.HealthCheckResponse.SERVING)
- servicer.set('grpc.test.TestServiceUnknown',
- health_pb2.HealthCheckResponse.UNKNOWN)
- servicer.set('grpc.test.TestServiceNotServing',
- health_pb2.HealthCheckResponse.NOT_SERVING)
- server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(server_pool)
- port = self._server.add_insecure_port('[::]:0')
- health_pb2.add_HealthServicer_to_server(servicer, self._server)
- self._server.start()
+ def setUp(self):
+ servicer = health.HealthServicer()
+ servicer.set('', health_pb2.HealthCheckResponse.SERVING)
+ servicer.set('grpc.test.TestServiceServing',
+ health_pb2.HealthCheckResponse.SERVING)
+ servicer.set('grpc.test.TestServiceUnknown',
+ health_pb2.HealthCheckResponse.UNKNOWN)
+ servicer.set('grpc.test.TestServiceNotServing',
+ health_pb2.HealthCheckResponse.NOT_SERVING)
+ server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ health_pb2.add_HealthServicer_to_server(servicer, self._server)
+ self._server.start()
+
+ channel = grpc.insecure_channel('localhost:%d' % port)
+ self._stub = health_pb2.HealthStub(channel)
- channel = grpc.insecure_channel('localhost:%d' % port)
- self._stub = health_pb2.HealthStub(channel)
+ def test_empty_service(self):
+ request = health_pb2.HealthCheckRequest()
+ resp = self._stub.Check(request)
+ self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
- def test_empty_service(self):
- request = health_pb2.HealthCheckRequest()
- resp = self._stub.Check(request)
- self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
+ def test_serving_service(self):
+ request = health_pb2.HealthCheckRequest(
+ service='grpc.test.TestServiceServing')
+ resp = self._stub.Check(request)
+ self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
- def test_serving_service(self):
- request = health_pb2.HealthCheckRequest(
- service='grpc.test.TestServiceServing')
- resp = self._stub.Check(request)
- self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
+ def test_unknown_serivce(self):
+ request = health_pb2.HealthCheckRequest(
+ service='grpc.test.TestServiceUnknown')
+ resp = self._stub.Check(request)
+ self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
- def test_unknown_serivce(self):
- request = health_pb2.HealthCheckRequest(
- service='grpc.test.TestServiceUnknown')
- resp = self._stub.Check(request)
- self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
+ def test_not_serving_service(self):
+ request = health_pb2.HealthCheckRequest(
+ service='grpc.test.TestServiceNotServing')
+ resp = self._stub.Check(request)
+ self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
+ resp.status)
- def test_not_serving_service(self):
- request = health_pb2.HealthCheckRequest(
- service='grpc.test.TestServiceNotServing')
- resp = self._stub.Check(request)
- self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING, resp.status)
+ def test_not_found_service(self):
+ request = health_pb2.HealthCheckRequest(service='not-found')
+ with self.assertRaises(grpc.RpcError) as context:
+ resp = self._stub.Check(request)
- def test_not_found_service(self):
- request = health_pb2.HealthCheckRequest(
- service='not-found')
- with self.assertRaises(grpc.RpcError) as context:
- resp = self._stub.Check(request)
-
- self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
+ self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/http2/_negative_http2_client.py b/src/python/grpcio_tests/tests/http2/_negative_http2_client.py
deleted file mode 100644
index f8604683b3..0000000000
--- a/src/python/grpcio_tests/tests/http2/_negative_http2_client.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The Python client used to test negative http2 conditions."""
-
-import argparse
-
-import grpc
-from src.proto.grpc.testing import test_pb2
-from src.proto.grpc.testing import messages_pb2
-
-def _validate_payload_type_and_length(response, expected_type, expected_length):
- if response.payload.type is not expected_type:
- raise ValueError(
- 'expected payload type %s, got %s' %
- (expected_type, type(response.payload.type)))
- elif len(response.payload.body) != expected_length:
- raise ValueError(
- 'expected payload body size %d, got %d' %
- (expected_length, len(response.payload.body)))
-
-def _expect_status_code(call, expected_code):
- if call.code() != expected_code:
- raise ValueError(
- 'expected code %s, got %s' % (expected_code, call.code()))
-
-def _expect_status_details(call, expected_details):
- if call.details() != expected_details:
- raise ValueError(
- 'expected message %s, got %s' % (expected_details, call.details()))
-
-def _validate_status_code_and_details(call, expected_code, expected_details):
- _expect_status_code(call, expected_code)
- _expect_status_details(call, expected_details)
-
-# common requests
-_REQUEST_SIZE = 314159
-_RESPONSE_SIZE = 271828
-
-_SIMPLE_REQUEST = messages_pb2.SimpleRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_size=_RESPONSE_SIZE,
- payload=messages_pb2.Payload(body=b'\x00' * _REQUEST_SIZE))
-
-def _goaway(stub):
- first_response = stub.UnaryCall(_SIMPLE_REQUEST)
- _validate_payload_type_and_length(first_response,
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
- second_response = stub.UnaryCall(_SIMPLE_REQUEST)
- _validate_payload_type_and_length(second_response,
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
-
-def _rst_after_header(stub):
- resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
- _validate_status_code_and_details(resp_future, grpc.StatusCode.UNAVAILABLE, "")
-
-def _rst_during_data(stub):
- resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
- _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "")
-
-def _rst_after_data(stub):
- resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
- _validate_payload_type_and_length(next(resp_future),
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
- _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "")
-
-def _ping(stub):
- response = stub.UnaryCall(_SIMPLE_REQUEST)
- _validate_payload_type_and_length(response,
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
-
-def _max_streams(stub):
- # send one req to ensure server sets MAX_STREAMS
- response = stub.UnaryCall(_SIMPLE_REQUEST)
- _validate_payload_type_and_length(response,
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
-
- # give the streams a workout
- futures = []
- for _ in range(15):
- futures.append(stub.UnaryCall.future(_SIMPLE_REQUEST))
- for future in futures:
- _validate_payload_type_and_length(future.result(),
- messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
-
-def _run_test_case(test_case, stub):
- if test_case == 'goaway':
- _goaway(stub)
- elif test_case == 'rst_after_header':
- _rst_after_header(stub)
- elif test_case == 'rst_during_data':
- _rst_during_data(stub)
- elif test_case == 'rst_after_data':
- _rst_after_data(stub)
- elif test_case =='ping':
- _ping(stub)
- elif test_case == 'max_streams':
- _max_streams(stub)
- else:
- raise ValueError("Invalid test case: %s" % test_case)
-
-def _args():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--server_host', help='the host to which to connect', type=str,
- default="127.0.0.1")
- parser.add_argument(
- '--server_port', help='the port to which to connect', type=int,
- default="8080")
- parser.add_argument(
- '--test_case', help='the test case to execute', type=str,
- default="goaway")
- return parser.parse_args()
-
-def _stub(server_host, server_port):
- target = '{}:{}'.format(server_host, server_port)
- channel = grpc.insecure_channel(target)
- return test_pb2.TestServiceStub(channel)
-
-def main():
- args = _args()
- stub = _stub(args.server_host, args.server_port)
- _run_test_case(args.test_case, stub)
-
-
-if __name__ == '__main__':
- main()
diff --git a/src/python/grpcio_tests/tests/http2/negative_http2_client.py b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
new file mode 100644
index 0000000000..b8adf093a5
--- /dev/null
+++ b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
@@ -0,0 +1,175 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""The Python client used to test negative http2 conditions."""
+
+import argparse
+
+import grpc
+from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import messages_pb2
+
+
+def _validate_payload_type_and_length(response, expected_type, expected_length):
+ if response.payload.type is not expected_type:
+ raise ValueError('expected payload type %s, got %s' %
+ (expected_type, type(response.payload.type)))
+ elif len(response.payload.body) != expected_length:
+ raise ValueError('expected payload body size %d, got %d' %
+ (expected_length, len(response.payload.body)))
+
+
+def _expect_status_code(call, expected_code):
+ if call.code() != expected_code:
+ raise ValueError('expected code %s, got %s' %
+ (expected_code, call.code()))
+
+
+def _expect_status_details(call, expected_details):
+ if call.details() != expected_details:
+ raise ValueError('expected message %s, got %s' %
+ (expected_details, call.details()))
+
+
+def _validate_status_code_and_details(call, expected_code, expected_details):
+ _expect_status_code(call, expected_code)
+ _expect_status_details(call, expected_details)
+
+
+# common requests
+_REQUEST_SIZE = 314159
+_RESPONSE_SIZE = 271828
+
+_SIMPLE_REQUEST = messages_pb2.SimpleRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_size=_RESPONSE_SIZE,
+ payload=messages_pb2.Payload(body=b'\x00' * _REQUEST_SIZE))
+
+
+def _goaway(stub):
+ first_response = stub.UnaryCall(_SIMPLE_REQUEST)
+ _validate_payload_type_and_length(first_response, messages_pb2.COMPRESSABLE,
+ _RESPONSE_SIZE)
+ second_response = stub.UnaryCall(_SIMPLE_REQUEST)
+ _validate_payload_type_and_length(second_response,
+ messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
+
+
+def _rst_after_header(stub):
+ resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
+ _validate_status_code_and_details(resp_future, grpc.StatusCode.INTERNAL,
+ "Received RST_STREAM with error code 0")
+
+
+def _rst_during_data(stub):
+ resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
+ _validate_status_code_and_details(resp_future, grpc.StatusCode.INTERNAL,
+ "Received RST_STREAM with error code 0")
+
+
+def _rst_after_data(stub):
+ resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST)
+ _validate_payload_type_and_length(
+ next(resp_future), messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
+ _validate_status_code_and_details(resp_future, grpc.StatusCode.INTERNAL,
+ "Received RST_STREAM with error code 0")
+
+
+def _ping(stub):
+ response = stub.UnaryCall(_SIMPLE_REQUEST)
+ _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
+ _RESPONSE_SIZE)
+
+
+def _max_streams(stub):
+ # send one req to ensure server sets MAX_STREAMS
+ response = stub.UnaryCall(_SIMPLE_REQUEST)
+ _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
+ _RESPONSE_SIZE)
+
+ # give the streams a workout
+ futures = []
+ for _ in range(15):
+ futures.append(stub.UnaryCall.future(_SIMPLE_REQUEST))
+ for future in futures:
+ _validate_payload_type_and_length(
+ future.result(), messages_pb2.COMPRESSABLE, _RESPONSE_SIZE)
+
+
+def _run_test_case(test_case, stub):
+ if test_case == 'goaway':
+ _goaway(stub)
+ elif test_case == 'rst_after_header':
+ _rst_after_header(stub)
+ elif test_case == 'rst_during_data':
+ _rst_during_data(stub)
+ elif test_case == 'rst_after_data':
+ _rst_after_data(stub)
+ elif test_case == 'ping':
+ _ping(stub)
+ elif test_case == 'max_streams':
+ _max_streams(stub)
+ else:
+ raise ValueError("Invalid test case: %s" % test_case)
+
+
+def _args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--server_host',
+ help='the host to which to connect',
+ type=str,
+ default="127.0.0.1")
+ parser.add_argument(
+ '--server_port',
+ help='the port to which to connect',
+ type=int,
+ default="8080")
+ parser.add_argument(
+ '--test_case',
+ help='the test case to execute',
+ type=str,
+ default="goaway")
+ return parser.parse_args()
+
+
+def _stub(server_host, server_port):
+ target = '{}:{}'.format(server_host, server_port)
+ channel = grpc.insecure_channel(target)
+ grpc.channel_ready_future(channel).result()
+ return test_pb2.TestServiceStub(channel)
+
+
+def main():
+ args = _args()
+ stub = _stub(args.server_host, args.server_port)
+ _run_test_case(args.test_case, stub)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/python/grpcio_tests/tests/interop/__init__.py b/src/python/grpcio_tests/tests/interop/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/interop/__init__.py
+++ b/src/python/grpcio_tests/tests/interop/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
index 4fb22b4d9d..58f3b364ba 100644
--- a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Insecure client-server interoperability as a unit test."""
from concurrent import futures
@@ -40,19 +39,18 @@ from tests.interop import methods
from tests.interop import server
-class InsecureIntraopTest(
- _intraop_test_case.IntraopTestCase,
- unittest.TestCase):
+class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
+ unittest.TestCase):
- def setUp(self):
- self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(
- methods.TestService(), self.server)
- port = self.server.add_insecure_port('[::]:0')
- self.server.start()
- self.stub = test_pb2.TestServiceStub(
- grpc.insecure_channel('localhost:{}'.format(port)))
+ def setUp(self):
+ self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
+ self.server)
+ port = self.server.add_insecure_port('[::]:0')
+ self.server.start()
+ self.stub = test_pb2.TestServiceStub(
+ grpc.insecure_channel('localhost:{}'.format(port)))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/interop/_intraop_test_case.py b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
index fe1c173992..424f93980c 100644
--- a/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
+++ b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
@@ -26,39 +26,41 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Common code for unit tests of the interoperability test code."""
from tests.interop import methods
class IntraopTestCase(object):
- """Unit test methods.
+ """Unit test methods.
This class must be mixed in with unittest.TestCase and a class that defines
setUp and tearDown methods that manage a stub attribute.
"""
- def testEmptyUnary(self):
- methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub, None)
+ def testEmptyUnary(self):
+ methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub, None)
- def testLargeUnary(self):
- methods.TestCase.LARGE_UNARY.test_interoperability(self.stub, None)
+ def testLargeUnary(self):
+ methods.TestCase.LARGE_UNARY.test_interoperability(self.stub, None)
- def testServerStreaming(self):
- methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub, None)
+ def testServerStreaming(self):
+ methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub, None)
- def testClientStreaming(self):
- methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub, None)
+ def testClientStreaming(self):
+ methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub, None)
- def testPingPong(self):
- methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
+ def testPingPong(self):
+ methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
- def testCancelAfterBegin(self):
- methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub, None)
+ def testCancelAfterBegin(self):
+ methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub,
+ None)
- def testCancelAfterFirstResponse(self):
- methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(self.stub, None)
+ def testCancelAfterFirstResponse(self):
+ methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(
+ self.stub, None)
- def testTimeoutOnSleepingServer(self):
- methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(self.stub, None)
+ def testTimeoutOnSleepingServer(self):
+ methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(
+ self.stub, None)
diff --git a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
index 3665c69726..b28406ed3f 100644
--- a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Secure client-server interoperability as a unit test."""
from concurrent import futures
@@ -42,24 +41,24 @@ from tests.interop import resources
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
-class SecureIntraopTest(
- _intraop_test_case.IntraopTestCase,
- unittest.TestCase):
+class SecureIntraopTest(_intraop_test_case.IntraopTestCase, unittest.TestCase):
- def setUp(self):
- self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(
- methods.TestService(), self.server)
- port = self.server.add_secure_port(
- '[::]:0', grpc.ssl_server_credentials(
- [(resources.private_key(), resources.certificate_chain())]))
- self.server.start()
- self.stub = test_pb2.TestServiceStub(
- grpc.secure_channel(
- 'localhost:{}'.format(port),
- grpc.ssl_channel_credentials(resources.test_root_certificates()),
- (('grpc.ssl_target_name_override', _SERVER_HOST_OVERRIDE,),)))
+ def setUp(self):
+ self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
+ self.server)
+ port = self.server.add_secure_port(
+ '[::]:0',
+ grpc.ssl_server_credentials(
+ [(resources.private_key(), resources.certificate_chain())]))
+ self.server.start()
+ self.stub = test_pb2.TestServiceStub(
+ grpc.secure_channel('localhost:{}'.format(port),
+ grpc.ssl_channel_credentials(
+ resources.test_root_certificates()), ((
+ 'grpc.ssl_target_name_override',
+ _SERVER_HOST_OVERRIDE,),)))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py
index afaa466254..f177896e8e 100644
--- a/src/python/grpcio_tests/tests/interop/client.py
+++ b/src/python/grpcio_tests/tests/interop/client.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""The Python implementation of the GRPC interoperability test client."""
import argparse
@@ -41,93 +40,107 @@ from tests.interop import resources
def _args():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--server_host', help='the host to which to connect', type=str,
- default="127.0.0.1")
- parser.add_argument(
- '--server_port', help='the port to which to connect', type=int)
- parser.add_argument(
- '--test_case', help='the test case to execute', type=str,
- default="large_unary")
- parser.add_argument(
- '--use_tls', help='require a secure connection', default=False,
- type=resources.parse_bool)
- parser.add_argument(
- '--use_test_ca', help='replace platform root CAs with ca.pem',
- default=False, type=resources.parse_bool)
- parser.add_argument(
- '--server_host_override', default="foo.test.google.fr",
- help='the server host to which to claim to connect', type=str)
- parser.add_argument('--oauth_scope', help='scope for OAuth tokens', type=str)
- parser.add_argument(
- '--default_service_account',
- help='email address of the default service account', type=str)
- return parser.parse_args()
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--server_host',
+ help='the host to which to connect',
+ type=str,
+ default="127.0.0.1")
+ parser.add_argument(
+ '--server_port', help='the port to which to connect', type=int)
+ parser.add_argument(
+ '--test_case',
+ help='the test case to execute',
+ type=str,
+ default="large_unary")
+ parser.add_argument(
+ '--use_tls',
+ help='require a secure connection',
+ default=False,
+ type=resources.parse_bool)
+ parser.add_argument(
+ '--use_test_ca',
+ help='replace platform root CAs with ca.pem',
+ default=False,
+ type=resources.parse_bool)
+ parser.add_argument(
+ '--server_host_override',
+ default="foo.test.google.fr",
+ help='the server host to which to claim to connect',
+ type=str)
+ parser.add_argument(
+ '--oauth_scope', help='scope for OAuth tokens', type=str)
+ parser.add_argument(
+ '--default_service_account',
+ help='email address of the default service account',
+ type=str)
+ return parser.parse_args()
def _application_default_credentials():
- return oauth2client_client.GoogleCredentials.get_application_default()
+ return oauth2client_client.GoogleCredentials.get_application_default()
def _stub(args):
- target = '{}:{}'.format(args.server_host, args.server_port)
- if args.test_case == 'oauth2_auth_token':
- google_credentials = _application_default_credentials()
- scoped_credentials = google_credentials.create_scoped([args.oauth_scope])
- access_token = scoped_credentials.get_access_token().access_token
- call_credentials = grpc.access_token_call_credentials(access_token)
- elif args.test_case == 'compute_engine_creds':
- google_credentials = _application_default_credentials()
- scoped_credentials = google_credentials.create_scoped([args.oauth_scope])
- # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
- # remaining use of the Beta API.
- call_credentials = implementations.google_call_credentials(
- scoped_credentials)
- elif args.test_case == 'jwt_token_creds':
- google_credentials = _application_default_credentials()
- # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
- # remaining use of the Beta API.
- call_credentials = implementations.google_call_credentials(
- google_credentials)
- else:
- call_credentials = None
- if args.use_tls:
- if args.use_test_ca:
- root_certificates = resources.test_root_certificates()
+ target = '{}:{}'.format(args.server_host, args.server_port)
+ if args.test_case == 'oauth2_auth_token':
+ google_credentials = _application_default_credentials()
+ scoped_credentials = google_credentials.create_scoped(
+ [args.oauth_scope])
+ access_token = scoped_credentials.get_access_token().access_token
+ call_credentials = grpc.access_token_call_credentials(access_token)
+ elif args.test_case == 'compute_engine_creds':
+ google_credentials = _application_default_credentials()
+ scoped_credentials = google_credentials.create_scoped(
+ [args.oauth_scope])
+ # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
+ # remaining use of the Beta API.
+ call_credentials = implementations.google_call_credentials(
+ scoped_credentials)
+ elif args.test_case == 'jwt_token_creds':
+ google_credentials = _application_default_credentials()
+ # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
+ # remaining use of the Beta API.
+ call_credentials = implementations.google_call_credentials(
+ google_credentials)
else:
- root_certificates = None # will load default roots.
-
- channel_credentials = grpc.ssl_channel_credentials(root_certificates)
- if call_credentials is not None:
- channel_credentials = grpc.composite_channel_credentials(
- channel_credentials, call_credentials)
-
- channel = grpc.secure_channel(
- target, channel_credentials,
- (('grpc.ssl_target_name_override', args.server_host_override,),))
- else:
- channel = grpc.insecure_channel(target)
- if args.test_case == "unimplemented_service":
- return test_pb2.UnimplementedServiceStub(channel)
- else:
- return test_pb2.TestServiceStub(channel)
+ call_credentials = None
+ if args.use_tls:
+ if args.use_test_ca:
+ root_certificates = resources.test_root_certificates()
+ else:
+ root_certificates = None # will load default roots.
+
+ channel_credentials = grpc.ssl_channel_credentials(root_certificates)
+ if call_credentials is not None:
+ channel_credentials = grpc.composite_channel_credentials(
+ channel_credentials, call_credentials)
+
+ channel = grpc.secure_channel(target, channel_credentials, ((
+ 'grpc.ssl_target_name_override',
+ args.server_host_override,),))
+ else:
+ channel = grpc.insecure_channel(target)
+ if args.test_case == "unimplemented_service":
+ return test_pb2.UnimplementedServiceStub(channel)
+ else:
+ return test_pb2.TestServiceStub(channel)
def _test_case_from_arg(test_case_arg):
- for test_case in methods.TestCase:
- if test_case_arg == test_case.value:
- return test_case
- else:
- raise ValueError('No test case "%s"!' % test_case_arg)
+ for test_case in methods.TestCase:
+ if test_case_arg == test_case.value:
+ return test_case
+ else:
+ raise ValueError('No test case "%s"!' % test_case_arg)
def test_interoperability():
- args = _args()
- stub = _stub(args)
- test_case = _test_case_from_arg(args.test_case)
- test_case.test_interoperability(stub, args)
+ args = _args()
+ stub = _stub(args)
+ test_case = _test_case_from_arg(args.test_case)
+ test_case.test_interoperability(stub, args)
if __name__ == '__main__':
- test_interoperability()
+ test_interoperability()
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index 9038ae5751..e1f8722168 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Implementations of interoperability test methods."""
import enum
@@ -46,463 +45,483 @@ from src.proto.grpc.testing import test_pb2
_INITIAL_METADATA_KEY = "x-grpc-test-echo-initial"
_TRAILING_METADATA_KEY = "x-grpc-test-echo-trailing-bin"
+
def _maybe_echo_metadata(servicer_context):
- """Copies metadata from request to response if it is present."""
- invocation_metadata = dict(servicer_context.invocation_metadata())
- if _INITIAL_METADATA_KEY in invocation_metadata:
- initial_metadatum = (
- _INITIAL_METADATA_KEY, invocation_metadata[_INITIAL_METADATA_KEY])
- servicer_context.send_initial_metadata((initial_metadatum,))
- if _TRAILING_METADATA_KEY in invocation_metadata:
- trailing_metadatum = (
- _TRAILING_METADATA_KEY, invocation_metadata[_TRAILING_METADATA_KEY])
- servicer_context.set_trailing_metadata((trailing_metadatum,))
+ """Copies metadata from request to response if it is present."""
+ invocation_metadata = dict(servicer_context.invocation_metadata())
+ if _INITIAL_METADATA_KEY in invocation_metadata:
+ initial_metadatum = (_INITIAL_METADATA_KEY,
+ invocation_metadata[_INITIAL_METADATA_KEY])
+ servicer_context.send_initial_metadata((initial_metadatum,))
+ if _TRAILING_METADATA_KEY in invocation_metadata:
+ trailing_metadatum = (_TRAILING_METADATA_KEY,
+ invocation_metadata[_TRAILING_METADATA_KEY])
+ servicer_context.set_trailing_metadata((trailing_metadatum,))
+
def _maybe_echo_status_and_message(request, servicer_context):
- """Sets the response context code and details if the request asks for them"""
- if request.HasField('response_status'):
- servicer_context.set_code(request.response_status.code)
- servicer_context.set_details(request.response_status.message)
+ """Sets the response context code and details if the request asks for them"""
+ if request.HasField('response_status'):
+ servicer_context.set_code(request.response_status.code)
+ servicer_context.set_details(request.response_status.message)
+
class TestService(test_pb2.TestServiceServicer):
- def EmptyCall(self, request, context):
- _maybe_echo_metadata(context)
- return empty_pb2.Empty()
+ def EmptyCall(self, request, context):
+ _maybe_echo_metadata(context)
+ return empty_pb2.Empty()
- def UnaryCall(self, request, context):
- _maybe_echo_metadata(context)
- _maybe_echo_status_and_message(request, context)
- return messages_pb2.SimpleResponse(
- payload=messages_pb2.Payload(
+ def UnaryCall(self, request, context):
+ _maybe_echo_metadata(context)
+ _maybe_echo_status_and_message(request, context)
+ return messages_pb2.SimpleResponse(payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
- def StreamingOutputCall(self, request, context):
- _maybe_echo_status_and_message(request, context)
- for response_parameters in request.response_parameters:
- yield messages_pb2.StreamingOutputCallResponse(
- payload=messages_pb2.Payload(
- type=request.response_type,
- body=b'\x00' * response_parameters.size))
-
- def StreamingInputCall(self, request_iterator, context):
- aggregate_size = 0
- for request in request_iterator:
- if request.payload is not None and request.payload.body:
- aggregate_size += len(request.payload.body)
- return messages_pb2.StreamingInputCallResponse(
- aggregated_payload_size=aggregate_size)
-
- def FullDuplexCall(self, request_iterator, context):
- _maybe_echo_metadata(context)
- for request in request_iterator:
- _maybe_echo_status_and_message(request, context)
- for response_parameters in request.response_parameters:
- yield messages_pb2.StreamingOutputCallResponse(
- payload=messages_pb2.Payload(
- type=request.payload.type,
- body=b'\x00' * response_parameters.size))
-
- # NOTE(nathaniel): Apparently this is the same as the full-duplex call?
- # NOTE(atash): It isn't even called in the interop spec (Oct 22 2015)...
- def HalfDuplexCall(self, request_iterator, context):
- return self.FullDuplexCall(request_iterator, context)
+ def StreamingOutputCall(self, request, context):
+ _maybe_echo_status_and_message(request, context)
+ for response_parameters in request.response_parameters:
+ yield messages_pb2.StreamingOutputCallResponse(
+ payload=messages_pb2.Payload(
+ type=request.response_type,
+ body=b'\x00' * response_parameters.size))
+
+ def StreamingInputCall(self, request_iterator, context):
+ aggregate_size = 0
+ for request in request_iterator:
+ if request.payload is not None and request.payload.body:
+ aggregate_size += len(request.payload.body)
+ return messages_pb2.StreamingInputCallResponse(
+ aggregated_payload_size=aggregate_size)
+
+ def FullDuplexCall(self, request_iterator, context):
+ _maybe_echo_metadata(context)
+ for request in request_iterator:
+ _maybe_echo_status_and_message(request, context)
+ for response_parameters in request.response_parameters:
+ yield messages_pb2.StreamingOutputCallResponse(
+ payload=messages_pb2.Payload(
+ type=request.payload.type,
+ body=b'\x00' * response_parameters.size))
+
+ # NOTE(nathaniel): Apparently this is the same as the full-duplex call?
+ # NOTE(atash): It isn't even called in the interop spec (Oct 22 2015)...
+ def HalfDuplexCall(self, request_iterator, context):
+ return self.FullDuplexCall(request_iterator, context)
def _expect_status_code(call, expected_code):
- if call.code() != expected_code:
- raise ValueError(
- 'expected code %s, got %s' % (expected_code, call.code()))
+ if call.code() != expected_code:
+ raise ValueError('expected code %s, got %s' %
+ (expected_code, call.code()))
def _expect_status_details(call, expected_details):
- if call.details() != expected_details:
- raise ValueError(
- 'expected message %s, got %s' % (expected_details, call.details()))
+ if call.details() != expected_details:
+ raise ValueError('expected message %s, got %s' %
+ (expected_details, call.details()))
def _validate_status_code_and_details(call, expected_code, expected_details):
- _expect_status_code(call, expected_code)
- _expect_status_details(call, expected_details)
+ _expect_status_code(call, expected_code)
+ _expect_status_details(call, expected_details)
def _validate_payload_type_and_length(response, expected_type, expected_length):
- if response.payload.type is not expected_type:
- raise ValueError(
- 'expected payload type %s, got %s' %
- (expected_type, type(response.payload.type)))
- elif len(response.payload.body) != expected_length:
- raise ValueError(
- 'expected payload body size %d, got %d' %
- (expected_length, len(response.payload.body)))
-
-
-def _large_unary_common_behavior(
- stub, fill_username, fill_oauth_scope, call_credentials):
- size = 314159
- request = messages_pb2.SimpleRequest(
- response_type=messages_pb2.COMPRESSABLE, response_size=size,
- payload=messages_pb2.Payload(body=b'\x00' * 271828),
- fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
- response_future = stub.UnaryCall.future(
- request, credentials=call_credentials)
- response = response_future.result()
- _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
- return response
+ if response.payload.type is not expected_type:
+ raise ValueError('expected payload type %s, got %s' %
+ (expected_type, type(response.payload.type)))
+ elif len(response.payload.body) != expected_length:
+ raise ValueError('expected payload body size %d, got %d' %
+ (expected_length, len(response.payload.body)))
+
+
+def _large_unary_common_behavior(stub, fill_username, fill_oauth_scope,
+ call_credentials):
+ size = 314159
+ request = messages_pb2.SimpleRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_size=size,
+ payload=messages_pb2.Payload(body=b'\x00' * 271828),
+ fill_username=fill_username,
+ fill_oauth_scope=fill_oauth_scope)
+ response_future = stub.UnaryCall.future(
+ request, credentials=call_credentials)
+ response = response_future.result()
+ _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
+ return response
def _empty_unary(stub):
- response = stub.EmptyCall(empty_pb2.Empty())
- if not isinstance(response, empty_pb2.Empty):
- raise TypeError(
- 'response is of type "%s", not empty_pb2.Empty!', type(response))
+ response = stub.EmptyCall(empty_pb2.Empty())
+ if not isinstance(response, empty_pb2.Empty):
+ raise TypeError('response is of type "%s", not empty_pb2.Empty!',
+ type(response))
def _large_unary(stub):
- _large_unary_common_behavior(stub, False, False, None)
+ _large_unary_common_behavior(stub, False, False, None)
def _client_streaming(stub):
- payload_body_sizes = (27182, 8, 1828, 45904,)
- payloads = (
- messages_pb2.Payload(body=b'\x00' * size)
- for size in payload_body_sizes)
- requests = (
- messages_pb2.StreamingInputCallRequest(payload=payload)
- for payload in payloads)
- response = stub.StreamingInputCall(requests)
- if response.aggregated_payload_size != 74922:
- raise ValueError(
- 'incorrect size %d!' % response.aggregated_payload_size)
+ payload_body_sizes = (
+ 27182,
+ 8,
+ 1828,
+ 45904,)
+ payloads = (messages_pb2.Payload(body=b'\x00' * size)
+ for size in payload_body_sizes)
+ requests = (messages_pb2.StreamingInputCallRequest(payload=payload)
+ for payload in payloads)
+ response = stub.StreamingInputCall(requests)
+ if response.aggregated_payload_size != 74922:
+ raise ValueError('incorrect size %d!' %
+ response.aggregated_payload_size)
def _server_streaming(stub):
- sizes = (31415, 9, 2653, 58979,)
-
- request = messages_pb2.StreamingOutputCallRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=sizes[0]),
- messages_pb2.ResponseParameters(size=sizes[1]),
- messages_pb2.ResponseParameters(size=sizes[2]),
- messages_pb2.ResponseParameters(size=sizes[3]),
- )
- )
- response_iterator = stub.StreamingOutputCall(request)
- for index, response in enumerate(response_iterator):
- _validate_payload_type_and_length(
- response, messages_pb2.COMPRESSABLE, sizes[index])
+ sizes = (
+ 31415,
+ 9,
+ 2653,
+ 58979,)
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=sizes[0]),
+ messages_pb2.ResponseParameters(size=sizes[1]),
+ messages_pb2.ResponseParameters(size=sizes[2]),
+ messages_pb2.ResponseParameters(size=sizes[3]),))
+ response_iterator = stub.StreamingOutputCall(request)
+ for index, response in enumerate(response_iterator):
+ _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
+ sizes[index])
class _Pipe(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._values = []
- self._open = True
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._open = True
- def __iter__(self):
- return self
+ def __iter__(self):
+ return self
- def __next__(self):
- return self.next()
+ def __next__(self):
+ return self.next()
- def next(self):
- with self._condition:
- while not self._values and self._open:
- self._condition.wait()
- if self._values:
- return self._values.pop(0)
- else:
- raise StopIteration()
+ def next(self):
+ with self._condition:
+ while not self._values and self._open:
+ self._condition.wait()
+ if self._values:
+ return self._values.pop(0)
+ else:
+ raise StopIteration()
- def add(self, value):
- with self._condition:
- self._values.append(value)
- self._condition.notify()
+ def add(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify()
- def close(self):
- with self._condition:
- self._open = False
- self._condition.notify()
+ def close(self):
+ with self._condition:
+ self._open = False
+ self._condition.notify()
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, type, value, traceback):
- self.close()
+ def __exit__(self, type, value, traceback):
+ self.close()
def _ping_pong(stub):
- request_response_sizes = (31415, 9, 2653, 58979,)
- request_payload_sizes = (27182, 8, 1828, 45904,)
-
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe)
- for response_size, payload_size in zip(
- request_response_sizes, request_payload_sizes):
- request = messages_pb2.StreamingOutputCallRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=response_size),),
- payload=messages_pb2.Payload(body=b'\x00' * payload_size))
- pipe.add(request)
- response = next(response_iterator)
- _validate_payload_type_and_length(
- response, messages_pb2.COMPRESSABLE, response_size)
+ request_response_sizes = (
+ 31415,
+ 9,
+ 2653,
+ 58979,)
+ request_payload_sizes = (
+ 27182,
+ 8,
+ 1828,
+ 45904,)
+
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe)
+ for response_size, payload_size in zip(request_response_sizes,
+ request_payload_sizes):
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=response_size),),
+ payload=messages_pb2.Payload(body=b'\x00' * payload_size))
+ pipe.add(request)
+ response = next(response_iterator)
+ _validate_payload_type_and_length(
+ response, messages_pb2.COMPRESSABLE, response_size)
def _cancel_after_begin(stub):
- with _Pipe() as pipe:
- response_future = stub.StreamingInputCall.future(pipe)
- response_future.cancel()
- if not response_future.cancelled():
- raise ValueError('expected cancelled method to return True')
- if response_future.code() is not grpc.StatusCode.CANCELLED:
- raise ValueError('expected status code CANCELLED')
+ with _Pipe() as pipe:
+ response_future = stub.StreamingInputCall.future(pipe)
+ response_future.cancel()
+ if not response_future.cancelled():
+ raise ValueError('expected cancelled method to return True')
+ if response_future.code() is not grpc.StatusCode.CANCELLED:
+ raise ValueError('expected status code CANCELLED')
def _cancel_after_first_response(stub):
- request_response_sizes = (31415, 9, 2653, 58979,)
- request_payload_sizes = (27182, 8, 1828, 45904,)
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe)
-
- response_size = request_response_sizes[0]
- payload_size = request_payload_sizes[0]
- request = messages_pb2.StreamingOutputCallRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=response_size),),
- payload=messages_pb2.Payload(body=b'\x00' * payload_size))
- pipe.add(request)
- response = next(response_iterator)
- # We test the contents of `response` in the Ping Pong test - don't check
- # them here.
- response_iterator.cancel()
-
- try:
- next(response_iterator)
- except grpc.RpcError as rpc_error:
- if rpc_error.code() is not grpc.StatusCode.CANCELLED:
- raise
- else:
- raise ValueError('expected call to be cancelled')
+ request_response_sizes = (
+ 31415,
+ 9,
+ 2653,
+ 58979,)
+ request_payload_sizes = (
+ 27182,
+ 8,
+ 1828,
+ 45904,)
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe)
+
+ response_size = request_response_sizes[0]
+ payload_size = request_payload_sizes[0]
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=response_size),),
+ payload=messages_pb2.Payload(body=b'\x00' * payload_size))
+ pipe.add(request)
+ response = next(response_iterator)
+ # We test the contents of `response` in the Ping Pong test - don't check
+ # them here.
+ response_iterator.cancel()
+
+ try:
+ next(response_iterator)
+ except grpc.RpcError as rpc_error:
+ if rpc_error.code() is not grpc.StatusCode.CANCELLED:
+ raise
+ else:
+ raise ValueError('expected call to be cancelled')
def _timeout_on_sleeping_server(stub):
- request_payload_size = 27182
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe, timeout=0.001)
-
- request = messages_pb2.StreamingOutputCallRequest(
- response_type=messages_pb2.COMPRESSABLE,
- payload=messages_pb2.Payload(body=b'\x00' * request_payload_size))
- pipe.add(request)
- try:
- next(response_iterator)
- except grpc.RpcError as rpc_error:
- if rpc_error.code() is not grpc.StatusCode.DEADLINE_EXCEEDED:
- raise
- else:
- raise ValueError('expected call to exceed deadline')
+ request_payload_size = 27182
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe, timeout=0.001)
+
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ payload=messages_pb2.Payload(body=b'\x00' * request_payload_size))
+ pipe.add(request)
+ try:
+ next(response_iterator)
+ except grpc.RpcError as rpc_error:
+ if rpc_error.code() is not grpc.StatusCode.DEADLINE_EXCEEDED:
+ raise
+ else:
+ raise ValueError('expected call to exceed deadline')
def _empty_stream(stub):
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe)
- pipe.close()
- try:
- next(response_iterator)
- raise ValueError('expected exactly 0 responses')
- except StopIteration:
- pass
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe)
+ pipe.close()
+ try:
+ next(response_iterator)
+ raise ValueError('expected exactly 0 responses')
+ except StopIteration:
+ pass
def _status_code_and_message(stub):
- details = 'test status message'
- code = 2
- status = grpc.StatusCode.UNKNOWN # code = 2
-
- # Test with a UnaryCall
- request = messages_pb2.SimpleRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_size=1,
- payload=messages_pb2.Payload(body=b'\x00'),
- response_status=messages_pb2.EchoStatus(code=code, message=details)
- )
- response_future = stub.UnaryCall.future(request)
- _validate_status_code_and_details(response_future, status, details)
-
- # Test with a FullDuplexCall
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe)
- request = messages_pb2.StreamingOutputCallRequest(
+ details = 'test status message'
+ code = 2
+ status = grpc.StatusCode.UNKNOWN # code = 2
+
+ # Test with a UnaryCall
+ request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=1),),
+ response_size=1,
payload=messages_pb2.Payload(body=b'\x00'),
- response_status=messages_pb2.EchoStatus(code=code, message=details))
- pipe.add(request) # sends the initial request.
- # Dropping out of with block closes the pipe
- _validate_status_code_and_details(response_iterator, status, details)
+ response_status=messages_pb2.EchoStatus(
+ code=code, message=details))
+ response_future = stub.UnaryCall.future(request)
+ _validate_status_code_and_details(response_future, status, details)
+
+ # Test with a FullDuplexCall
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe)
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(messages_pb2.ResponseParameters(size=1),),
+ payload=messages_pb2.Payload(body=b'\x00'),
+ response_status=messages_pb2.EchoStatus(
+ code=code, message=details))
+ pipe.add(request) # sends the initial request.
+ # Dropping out of with block closes the pipe
+ _validate_status_code_and_details(response_iterator, status, details)
def _unimplemented_method(test_service_stub):
- response_future = (
- test_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
- _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
+ response_future = (
+ test_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+ _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
def _unimplemented_service(unimplemented_service_stub):
- response_future = (
- unimplemented_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
- _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
+ response_future = (
+ unimplemented_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+ _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
def _custom_metadata(stub):
- initial_metadata_value = "test_initial_metadata_value"
- trailing_metadata_value = "\x0a\x0b\x0a\x0b\x0a\x0b"
- metadata = (
- (_INITIAL_METADATA_KEY, initial_metadata_value),
- (_TRAILING_METADATA_KEY, trailing_metadata_value))
-
- def _validate_metadata(response):
- initial_metadata = dict(response.initial_metadata())
- if initial_metadata[_INITIAL_METADATA_KEY] != initial_metadata_value:
- raise ValueError(
- 'expected initial metadata %s, got %s' % (
- initial_metadata_value, initial_metadata[_INITIAL_METADATA_KEY]))
- trailing_metadata = dict(response.trailing_metadata())
- if trailing_metadata[_TRAILING_METADATA_KEY] != trailing_metadata_value:
- raise ValueError(
- 'expected trailing metadata %s, got %s' % (
- trailing_metadata_value, initial_metadata[_TRAILING_METADATA_KEY]))
-
- # Testing with UnaryCall
- request = messages_pb2.SimpleRequest(
- response_type=messages_pb2.COMPRESSABLE,
- response_size=1,
- payload=messages_pb2.Payload(body=b'\x00'))
- response_future = stub.UnaryCall.future(request, metadata=metadata)
- _validate_metadata(response_future)
-
- # Testing with FullDuplexCall
- with _Pipe() as pipe:
- response_iterator = stub.FullDuplexCall(pipe, metadata=metadata)
- request = messages_pb2.StreamingOutputCallRequest(
+ initial_metadata_value = "test_initial_metadata_value"
+ trailing_metadata_value = "\x0a\x0b\x0a\x0b\x0a\x0b"
+ metadata = ((_INITIAL_METADATA_KEY, initial_metadata_value),
+ (_TRAILING_METADATA_KEY, trailing_metadata_value))
+
+ def _validate_metadata(response):
+ initial_metadata = dict(response.initial_metadata())
+ if initial_metadata[_INITIAL_METADATA_KEY] != initial_metadata_value:
+ raise ValueError('expected initial metadata %s, got %s' %
+ (initial_metadata_value,
+ initial_metadata[_INITIAL_METADATA_KEY]))
+ trailing_metadata = dict(response.trailing_metadata())
+ if trailing_metadata[_TRAILING_METADATA_KEY] != trailing_metadata_value:
+ raise ValueError('expected trailing metadata %s, got %s' %
+ (trailing_metadata_value,
+ initial_metadata[_TRAILING_METADATA_KEY]))
+
+ # Testing with UnaryCall
+ request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=1),))
- pipe.add(request) # Sends the request
- next(response_iterator) # Causes server to send trailing metadata
- # Dropping out of the with block closes the pipe
- _validate_metadata(response_iterator)
+ response_size=1,
+ payload=messages_pb2.Payload(body=b'\x00'))
+ response_future = stub.UnaryCall.future(request, metadata=metadata)
+ _validate_metadata(response_future)
+
+ # Testing with FullDuplexCall
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe, metadata=metadata)
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(messages_pb2.ResponseParameters(size=1),))
+ pipe.add(request) # Sends the request
+ next(response_iterator) # Causes server to send trailing metadata
+ # Dropping out of the with block closes the pipe
+ _validate_metadata(response_iterator)
+
def _compute_engine_creds(stub, args):
- response = _large_unary_common_behavior(stub, True, True, None)
- if args.default_service_account != response.username:
- raise ValueError(
- 'expected username %s, got %s' % (
- args.default_service_account, response.username))
+ response = _large_unary_common_behavior(stub, True, True, None)
+ if args.default_service_account != response.username:
+ raise ValueError('expected username %s, got %s' %
+ (args.default_service_account, response.username))
def _oauth2_auth_token(stub, args):
- json_key_filename = os.environ[
- oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
- response = _large_unary_common_behavior(stub, True, True, None)
- if wanted_email != response.username:
- raise ValueError(
- 'expected username %s, got %s' % (wanted_email, response.username))
- if args.oauth_scope.find(response.oauth_scope) == -1:
- raise ValueError(
- 'expected to find oauth scope "{}" in received "{}"'.format(
- response.oauth_scope, args.oauth_scope))
+ json_key_filename = os.environ[
+ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
+ wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ response = _large_unary_common_behavior(stub, True, True, None)
+ if wanted_email != response.username:
+ raise ValueError('expected username %s, got %s' %
+ (wanted_email, response.username))
+ if args.oauth_scope.find(response.oauth_scope) == -1:
+ raise ValueError('expected to find oauth scope "{}" in received "{}"'.
+ format(response.oauth_scope, args.oauth_scope))
def _jwt_token_creds(stub, args):
- json_key_filename = os.environ[
- oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
- response = _large_unary_common_behavior(stub, True, False, None)
- if wanted_email != response.username:
- raise ValueError(
- 'expected username %s, got %s' % (wanted_email, response.username))
+ json_key_filename = os.environ[
+ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
+ wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ response = _large_unary_common_behavior(stub, True, False, None)
+ if wanted_email != response.username:
+ raise ValueError('expected username %s, got %s' %
+ (wanted_email, response.username))
def _per_rpc_creds(stub, args):
- json_key_filename = os.environ[
- oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
- credentials = oauth2client_client.GoogleCredentials.get_application_default()
- scoped_credentials = credentials.create_scoped([args.oauth_scope])
- # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
- # remaining use of the Beta API.
- call_credentials = implementations.google_call_credentials(
- scoped_credentials)
- response = _large_unary_common_behavior(stub, True, False, call_credentials)
- if wanted_email != response.username:
- raise ValueError(
- 'expected username %s, got %s' % (wanted_email, response.username))
+ json_key_filename = os.environ[
+ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
+ wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ credentials = oauth2client_client.GoogleCredentials.get_application_default(
+ )
+ scoped_credentials = credentials.create_scoped([args.oauth_scope])
+ # TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
+ # remaining use of the Beta API.
+ call_credentials = implementations.google_call_credentials(
+ scoped_credentials)
+ response = _large_unary_common_behavior(stub, True, False, call_credentials)
+ if wanted_email != response.username:
+ raise ValueError('expected username %s, got %s' %
+ (wanted_email, response.username))
@enum.unique
class TestCase(enum.Enum):
- EMPTY_UNARY = 'empty_unary'
- LARGE_UNARY = 'large_unary'
- SERVER_STREAMING = 'server_streaming'
- CLIENT_STREAMING = 'client_streaming'
- PING_PONG = 'ping_pong'
- CANCEL_AFTER_BEGIN = 'cancel_after_begin'
- CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
- EMPTY_STREAM = 'empty_stream'
- STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
- UNIMPLEMENTED_METHOD = 'unimplemented_method'
- UNIMPLEMENTED_SERVICE = 'unimplemented_service'
- CUSTOM_METADATA = "custom_metadata"
- COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
- OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
- JWT_TOKEN_CREDS = 'jwt_token_creds'
- PER_RPC_CREDS = 'per_rpc_creds'
- TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server'
-
- def test_interoperability(self, stub, args):
- if self is TestCase.EMPTY_UNARY:
- _empty_unary(stub)
- elif self is TestCase.LARGE_UNARY:
- _large_unary(stub)
- elif self is TestCase.SERVER_STREAMING:
- _server_streaming(stub)
- elif self is TestCase.CLIENT_STREAMING:
- _client_streaming(stub)
- elif self is TestCase.PING_PONG:
- _ping_pong(stub)
- elif self is TestCase.CANCEL_AFTER_BEGIN:
- _cancel_after_begin(stub)
- elif self is TestCase.CANCEL_AFTER_FIRST_RESPONSE:
- _cancel_after_first_response(stub)
- elif self is TestCase.TIMEOUT_ON_SLEEPING_SERVER:
- _timeout_on_sleeping_server(stub)
- elif self is TestCase.EMPTY_STREAM:
- _empty_stream(stub)
- elif self is TestCase.STATUS_CODE_AND_MESSAGE:
- _status_code_and_message(stub)
- elif self is TestCase.UNIMPLEMENTED_METHOD:
- _unimplemented_method(stub)
- elif self is TestCase.UNIMPLEMENTED_SERVICE:
- _unimplemented_service(stub)
- elif self is TestCase.CUSTOM_METADATA:
- _custom_metadata(stub)
- elif self is TestCase.COMPUTE_ENGINE_CREDS:
- _compute_engine_creds(stub, args)
- elif self is TestCase.OAUTH2_AUTH_TOKEN:
- _oauth2_auth_token(stub, args)
- elif self is TestCase.JWT_TOKEN_CREDS:
- _jwt_token_creds(stub, args)
- elif self is TestCase.PER_RPC_CREDS:
- _per_rpc_creds(stub, args)
- else:
- raise NotImplementedError('Test case "%s" not implemented!' % self.name)
+ EMPTY_UNARY = 'empty_unary'
+ LARGE_UNARY = 'large_unary'
+ SERVER_STREAMING = 'server_streaming'
+ CLIENT_STREAMING = 'client_streaming'
+ PING_PONG = 'ping_pong'
+ CANCEL_AFTER_BEGIN = 'cancel_after_begin'
+ CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
+ EMPTY_STREAM = 'empty_stream'
+ STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
+ UNIMPLEMENTED_METHOD = 'unimplemented_method'
+ UNIMPLEMENTED_SERVICE = 'unimplemented_service'
+ CUSTOM_METADATA = "custom_metadata"
+ COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
+ OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
+ JWT_TOKEN_CREDS = 'jwt_token_creds'
+ PER_RPC_CREDS = 'per_rpc_creds'
+ TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server'
+
+ def test_interoperability(self, stub, args):
+ if self is TestCase.EMPTY_UNARY:
+ _empty_unary(stub)
+ elif self is TestCase.LARGE_UNARY:
+ _large_unary(stub)
+ elif self is TestCase.SERVER_STREAMING:
+ _server_streaming(stub)
+ elif self is TestCase.CLIENT_STREAMING:
+ _client_streaming(stub)
+ elif self is TestCase.PING_PONG:
+ _ping_pong(stub)
+ elif self is TestCase.CANCEL_AFTER_BEGIN:
+ _cancel_after_begin(stub)
+ elif self is TestCase.CANCEL_AFTER_FIRST_RESPONSE:
+ _cancel_after_first_response(stub)
+ elif self is TestCase.TIMEOUT_ON_SLEEPING_SERVER:
+ _timeout_on_sleeping_server(stub)
+ elif self is TestCase.EMPTY_STREAM:
+ _empty_stream(stub)
+ elif self is TestCase.STATUS_CODE_AND_MESSAGE:
+ _status_code_and_message(stub)
+ elif self is TestCase.UNIMPLEMENTED_METHOD:
+ _unimplemented_method(stub)
+ elif self is TestCase.UNIMPLEMENTED_SERVICE:
+ _unimplemented_service(stub)
+ elif self is TestCase.CUSTOM_METADATA:
+ _custom_metadata(stub)
+ elif self is TestCase.COMPUTE_ENGINE_CREDS:
+ _compute_engine_creds(stub, args)
+ elif self is TestCase.OAUTH2_AUTH_TOKEN:
+ _oauth2_auth_token(stub, args)
+ elif self is TestCase.JWT_TOKEN_CREDS:
+ _jwt_token_creds(stub, args)
+ elif self is TestCase.PER_RPC_CREDS:
+ _per_rpc_creds(stub, args)
+ else:
+ raise NotImplementedError('Test case "%s" not implemented!' %
+ self.name)
diff --git a/src/python/grpcio_tests/tests/interop/resources.py b/src/python/grpcio_tests/tests/interop/resources.py
index c424385cf6..2ec2eb92b4 100644
--- a/src/python/grpcio_tests/tests/interop/resources.py
+++ b/src/python/grpcio_tests/tests/interop/resources.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Constants and functions for data used in interoperability testing."""
import argparse
@@ -40,22 +39,22 @@ _CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem'
def test_root_certificates():
- return pkg_resources.resource_string(
- __name__, _ROOT_CERTIFICATES_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__,
+ _ROOT_CERTIFICATES_RESOURCE_PATH)
def private_key():
- return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
def certificate_chain():
- return pkg_resources.resource_string(
- __name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__,
+ _CERTIFICATE_CHAIN_RESOURCE_PATH)
def parse_bool(value):
- if value == 'true':
- return True
- if value == 'false':
- return False
- raise argparse.ArgumentTypeError('Only true/false allowed')
+ if value == 'true':
+ return True
+ if value == 'false':
+ return False
+ raise argparse.ArgumentTypeError('Only true/false allowed')
diff --git a/src/python/grpcio_tests/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py
index 1ae83bc57d..65f1604eb8 100644
--- a/src/python/grpcio_tests/tests/interop/server.py
+++ b/src/python/grpcio_tests/tests/interop/server.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""The Python implementation of the GRPC interoperability test server."""
import argparse
@@ -44,34 +43,36 @@ _ONE_DAY_IN_SECONDS = 60 * 60 * 24
def serve():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--port', help='the port on which to serve', type=int)
- parser.add_argument(
- '--use_tls', help='require a secure connection',
- default=False, type=resources.parse_bool)
- args = parser.parse_args()
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--port', help='the port on which to serve', type=int)
+ parser.add_argument(
+ '--use_tls',
+ help='require a secure connection',
+ default=False,
+ type=resources.parse_bool)
+ args = parser.parse_args()
+
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ test_pb2.add_TestServiceServicer_to_server(methods.TestService(), server)
+ if args.use_tls:
+ private_key = resources.private_key()
+ certificate_chain = resources.certificate_chain()
+ credentials = grpc.ssl_server_credentials((
+ (private_key, certificate_chain),))
+ server.add_secure_port('[::]:{}'.format(args.port), credentials)
+ else:
+ server.add_insecure_port('[::]:{}'.format(args.port))
- server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(methods.TestService(), server)
- if args.use_tls:
- private_key = resources.private_key()
- certificate_chain = resources.certificate_chain()
- credentials = grpc.ssl_server_credentials(
- ((private_key, certificate_chain),))
- server.add_secure_port('[::]:{}'.format(args.port), credentials)
- else:
- server.add_insecure_port('[::]:{}'.format(args.port))
+ server.start()
+ logging.info('Server serving.')
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except BaseException as e:
+ logging.info('Caught exception "%s"; stopping server...', e)
+ server.stop(None)
+ logging.info('Server stopped; exiting.')
- server.start()
- logging.info('Server serving.')
- try:
- while True:
- time.sleep(_ONE_DAY_IN_SECONDS)
- except BaseException as e:
- logging.info('Caught exception "%s"; stopping server...', e)
- server.stop(None)
- logging.info('Server stopped; exiting.')
if __name__ == '__main__':
- serve()
+ serve()
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
index 7ca2bcff38..ae5da2c3db 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
@@ -58,436 +58,440 @@ ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._paused = False
- self._fail = False
-
- @contextlib.contextmanager
- def pause(self): # pylint: disable=invalid-name
- with self._condition:
- self._paused = True
- yield
- with self._condition:
- self._paused = False
- self._condition.notify_all()
-
- @contextlib.contextmanager
- def fail(self): # pylint: disable=invalid-name
- with self._condition:
- self._fail = True
- yield
- with self._condition:
- self._fail = False
-
- def _control(self): # pylint: disable=invalid-name
- with self._condition:
- if self._fail:
- raise ValueError()
- while self._paused:
- self._condition.wait()
-
- def UnaryCall(self, request, unused_rpc_context):
- response = response_pb2.SimpleResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * request.response_size
- self._control()
- return response
-
- def StreamingOutputCall(self, request, unused_rpc_context):
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
- self._control()
- yield response
-
- def StreamingInputCall(self, request_iter, unused_rpc_context):
- response = response_pb2.StreamingInputCallResponse()
- aggregated_payload_size = 0
- for request in request_iter:
- aggregated_payload_size += len(request.payload.payload_compressable)
- response.aggregated_payload_size = aggregated_payload_size
- self._control()
- return response
-
- def FullDuplexCall(self, request_iter, unused_rpc_context):
- for request in request_iter:
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
- self._control()
- yield response
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._paused = False
+ self._fail = False
+
+ @contextlib.contextmanager
+ def pause(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
- def HalfDuplexCall(self, request_iter, unused_rpc_context):
- responses = []
- for request in request_iter:
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
+ @contextlib.contextmanager
+ def fail(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._fail = True
+ yield
+ with self._condition:
+ self._fail = False
+
+ def _control(self): # pylint: disable=invalid-name
+ with self._condition:
+ if self._fail:
+ raise ValueError()
+ while self._paused:
+ self._condition.wait()
+
+ def UnaryCall(self, request, unused_rpc_context):
+ response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
+ response.payload.payload_compressable = 'a' * request.response_size
+ self._control()
+ return response
+
+ def StreamingOutputCall(self, request, unused_rpc_context):
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def StreamingInputCall(self, request_iter, unused_rpc_context):
+ response = response_pb2.StreamingInputCallResponse()
+ aggregated_payload_size = 0
+ for request in request_iter:
+ aggregated_payload_size += len(request.payload.payload_compressable)
+ response.aggregated_payload_size = aggregated_payload_size
self._control()
- responses.append(response)
- for response in responses:
- yield response
+ return response
+
+ def FullDuplexCall(self, request_iter, unused_rpc_context):
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def HalfDuplexCall(self, request_iter, unused_rpc_context):
+ responses = []
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ responses.append(response)
+ for response in responses:
+ yield response
class _Service(
- collections.namedtuple(
- '_Service', ('servicer_methods', 'server', 'stub',))):
- """A live and running service.
+ collections.namedtuple('_Service', (
+ 'servicer_methods',
+ 'server',
+ 'stub',))):
+ """A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
-
+
def _CreateService():
- """Provides a servicer backend and a stub.
+ """Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
- servicer_methods = _ServicerMethods()
+ servicer_methods = _ServicerMethods()
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
- def UnaryCall(self, request, context):
- return servicer_methods.UnaryCall(request, context)
+ def UnaryCall(self, request, context):
+ return servicer_methods.UnaryCall(request, context)
- def StreamingOutputCall(self, request, context):
- return servicer_methods.StreamingOutputCall(request, context)
+ def StreamingOutputCall(self, request, context):
+ return servicer_methods.StreamingOutputCall(request, context)
- def StreamingInputCall(self, request_iter, context):
- return servicer_methods.StreamingInputCall(request_iter, context)
+ def StreamingInputCall(self, request_iter, context):
+ return servicer_methods.StreamingInputCall(request_iter, context)
- def FullDuplexCall(self, request_iter, context):
- return servicer_methods.FullDuplexCall(request_iter, context)
+ def FullDuplexCall(self, request_iter, context):
+ return servicer_methods.FullDuplexCall(request_iter, context)
- def HalfDuplexCall(self, request_iter, context):
- return servicer_methods.HalfDuplexCall(request_iter, context)
+ def HalfDuplexCall(self, request_iter, context):
+ return servicer_methods.HalfDuplexCall(request_iter, context)
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
- return _Service(servicer_methods, server, stub)
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
- """Provides a servicer backend that fails to implement methods and its stub.
+ """Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
- pass
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ pass
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
- return _Service(None, server, stub)
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ return _Service(None, server, stub)
def _streaming_input_request_iterator():
- for _ in range(3):
- request = request_pb2.StreamingInputCallRequest()
- request.payload.payload_type = payload_pb2.COMPRESSABLE
- request.payload.payload_compressable = 'a'
- yield request
+ for _ in range(3):
+ request = request_pb2.StreamingInputCallRequest()
+ request.payload.payload_type = payload_pb2.COMPRESSABLE
+ request.payload.payload_compressable = 'a'
+ yield request
def _streaming_output_request():
- request = request_pb2.StreamingOutputCallRequest()
- sizes = [1, 2, 3]
- request.response_parameters.add(size=sizes[0], interval_us=0)
- request.response_parameters.add(size=sizes[1], interval_us=0)
- request.response_parameters.add(size=sizes[2], interval_us=0)
- return request
+ request = request_pb2.StreamingOutputCallRequest()
+ sizes = [1, 2, 3]
+ request.response_parameters.add(size=sizes[0], interval_us=0)
+ request.response_parameters.add(size=sizes[1], interval_us=0)
+ request.response_parameters.add(size=sizes[2], interval_us=0)
+ return request
def _full_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=2, interval_us=0)
- request.response_parameters.add(size=3, interval_us=0)
- yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
class PythonPluginTest(unittest.TestCase):
- """Test case for the gRPC Python protoc-plugin.
+ """Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
- def testImportAttributes(self):
- # check that we can access the generated module and its members.
- self.assertIsNotNone(
- getattr(service_pb2, STUB_IDENTIFIER, None))
- self.assertIsNotNone(
- getattr(service_pb2, SERVICER_IDENTIFIER, None))
- self.assertIsNotNone(
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
-
- def testUpDown(self):
- service = _CreateService()
- self.assertIsNotNone(service.servicer_methods)
- self.assertIsNotNone(service.server)
- self.assertIsNotNone(service.stub)
-
- def testIncompleteServicer(self):
- service = _CreateIncompleteService()
- request = request_pb2.SimpleRequest(response_size=13)
- with self.assertRaises(grpc.RpcError) as exception_context:
- service.stub.UnaryCall(request)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED)
-
- def testUnaryCall(self):
- service = _CreateService()
- request = request_pb2.SimpleRequest(response_size=13)
- response = service.stub.UnaryCall(request)
- expected_response = service.servicer_methods.UnaryCall(
- request, 'not a real context!')
- self.assertEqual(expected_response, response)
-
- def testUnaryCallFuture(self):
- service = _CreateService()
- request = request_pb2.SimpleRequest(response_size=13)
- # Check that the call does not block waiting for the server to respond.
- with service.servicer_methods.pause():
- response_future = service.stub.UnaryCall.future(request)
- response = response_future.result()
- expected_response = service.servicer_methods.UnaryCall(
- request, 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testUnaryCallFutureExpired(self):
- service = _CreateService()
- request = request_pb2.SimpleRequest(response_size=13)
- with service.servicer_methods.pause():
- response_future = service.stub.UnaryCall.future(
- request, timeout=test_constants.SHORT_TIMEOUT)
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
- self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
-
- def testUnaryCallFutureCancelled(self):
- service = _CreateService()
- request = request_pb2.SimpleRequest(response_size=13)
- with service.servicer_methods.pause():
- response_future = service.stub.UnaryCall.future(request)
- response_future.cancel()
- self.assertTrue(response_future.cancelled())
- self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
-
- def testUnaryCallFutureFailed(self):
- service = _CreateService()
- request = request_pb2.SimpleRequest(response_size=13)
- with service.servicer_methods.fail():
- response_future = service.stub.UnaryCall.future(request)
- self.assertIsNotNone(response_future.exception())
- self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
-
- def testStreamingOutputCall(self):
- service = _CreateService()
- request = _streaming_output_request()
- responses = service.stub.StreamingOutputCall(request)
- expected_responses = service.servicer_methods.StreamingOutputCall(
- request, 'not a real RpcContext!')
- for expected_response, response in moves.zip_longest(
- expected_responses, responses):
- self.assertEqual(expected_response, response)
-
- def testStreamingOutputCallExpired(self):
- service = _CreateService()
- request = _streaming_output_request()
- with service.servicer_methods.pause():
- responses = service.stub.StreamingOutputCall(
- request, timeout=test_constants.SHORT_TIMEOUT)
- with self.assertRaises(grpc.RpcError) as exception_context:
- list(responses)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
-
- def testStreamingOutputCallCancelled(self):
- service = _CreateService()
- request = _streaming_output_request()
- responses = service.stub.StreamingOutputCall(request)
- next(responses)
- responses.cancel()
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(responses)
- self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
-
- def testStreamingOutputCallFailed(self):
- service = _CreateService()
- request = _streaming_output_request()
- with service.servicer_methods.fail():
- responses = service.stub.StreamingOutputCall(request)
- self.assertIsNotNone(responses)
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(responses)
- self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
-
- def testStreamingInputCall(self):
- service = _CreateService()
- response = service.stub.StreamingInputCall(
- _streaming_input_request_iterator())
- expected_response = service.servicer_methods.StreamingInputCall(
- _streaming_input_request_iterator(),
- 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testStreamingInputCallFuture(self):
- service = _CreateService()
- with service.servicer_methods.pause():
- response_future = service.stub.StreamingInputCall.future(
- _streaming_input_request_iterator())
- response = response_future.result()
- expected_response = service.servicer_methods.StreamingInputCall(
- _streaming_input_request_iterator(),
- 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testStreamingInputCallFutureExpired(self):
- service = _CreateService()
- with service.servicer_methods.pause():
- response_future = service.stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
- timeout=test_constants.SHORT_TIMEOUT)
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIsInstance(response_future.exception(), grpc.RpcError)
- self.assertIs(
- response_future.exception().code(), grpc.StatusCode.DEADLINE_EXCEEDED)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
-
- def testStreamingInputCallFutureCancelled(self):
- service = _CreateService()
- with service.servicer_methods.pause():
- response_future = service.stub.StreamingInputCall.future(
- _streaming_input_request_iterator())
- response_future.cancel()
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.result()
-
- def testStreamingInputCallFutureFailed(self):
- service = _CreateService()
- with service.servicer_methods.fail():
- response_future = service.stub.StreamingInputCall.future(
- _streaming_input_request_iterator())
- self.assertIsNotNone(response_future.exception())
- self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
-
- def testFullDuplexCall(self):
- service = _CreateService()
- responses = service.stub.FullDuplexCall(
- _full_duplex_request_iterator())
- expected_responses = service.servicer_methods.FullDuplexCall(
- _full_duplex_request_iterator(),
- 'not a real RpcContext!')
- for expected_response, response in moves.zip_longest(
- expected_responses, responses):
- self.assertEqual(expected_response, response)
-
- def testFullDuplexCallExpired(self):
- request_iterator = _full_duplex_request_iterator()
- service = _CreateService()
- with service.servicer_methods.pause():
- responses = service.stub.FullDuplexCall(
- request_iterator, timeout=test_constants.SHORT_TIMEOUT)
- with self.assertRaises(grpc.RpcError) as exception_context:
- list(responses)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
-
- def testFullDuplexCallCancelled(self):
- service = _CreateService()
- request_iterator = _full_duplex_request_iterator()
- responses = service.stub.FullDuplexCall(request_iterator)
- next(responses)
- responses.cancel()
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(responses)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.CANCELLED)
-
- def testFullDuplexCallFailed(self):
- request_iterator = _full_duplex_request_iterator()
- service = _CreateService()
- with service.servicer_methods.fail():
- responses = service.stub.FullDuplexCall(request_iterator)
- with self.assertRaises(grpc.RpcError) as exception_context:
+ def testImportAttributes(self):
+ # check that we can access the generated module and its members.
+ self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
+ self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
+
+ def testUpDown(self):
+ service = _CreateService()
+ self.assertIsNotNone(service.servicer_methods)
+ self.assertIsNotNone(service.server)
+ self.assertIsNotNone(service.stub)
+
+ def testIncompleteServicer(self):
+ service = _CreateIncompleteService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ service.stub.UnaryCall(request)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.UNIMPLEMENTED)
+
+ def testUnaryCall(self):
+ service = _CreateService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ response = service.stub.UnaryCall(request)
+ expected_response = service.servicer_methods.UnaryCall(
+ request, 'not a real context!')
+ self.assertEqual(expected_response, response)
+
+ def testUnaryCallFuture(self):
+ service = _CreateService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ # Check that the call does not block waiting for the server to respond.
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(request)
+ response = response_future.result()
+ expected_response = service.servicer_methods.UnaryCall(
+ request, 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testUnaryCallFutureExpired(self):
+ service = _CreateService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(
+ request, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
+ self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testUnaryCallFutureCancelled(self):
+ service = _CreateService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(request)
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+ self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
+
+ def testUnaryCallFutureFailed(self):
+ service = _CreateService()
+ request = request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.fail():
+ response_future = service.stub.UnaryCall.future(request)
+ self.assertIsNotNone(response_future.exception())
+ self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
+
+ def testStreamingOutputCall(self):
+ service = _CreateService()
+ request = _streaming_output_request()
+ responses = service.stub.StreamingOutputCall(request)
+ expected_responses = service.servicer_methods.StreamingOutputCall(
+ request, 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(expected_responses,
+ responses):
+ self.assertEqual(expected_response, response)
+
+ def testStreamingOutputCallExpired(self):
+ service = _CreateService()
+ request = _streaming_output_request()
+ with service.servicer_methods.pause():
+ responses = service.stub.StreamingOutputCall(
+ request, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ list(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testStreamingOutputCallCancelled(self):
+ service = _CreateService()
+ request = _streaming_output_request()
+ responses = service.stub.StreamingOutputCall(request)
next(responses)
- self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
-
- def testHalfDuplexCall(self):
- service = _CreateService()
- def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=2, interval_us=0)
- request.response_parameters.add(size=3, interval_us=0)
- yield request
- responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
- expected_responses = service.servicer_methods.HalfDuplexCall(
- half_duplex_request_iterator(), 'not a real RpcContext!')
- for expected_response, response in moves.zip_longest(
- expected_responses, responses):
- self.assertEqual(expected_response, response)
-
- def testHalfDuplexCallWedged(self):
- condition = threading.Condition()
- wait_cell = [False]
- @contextlib.contextmanager
- def wait(): # pylint: disable=invalid-name
- # Where's Python 3's 'nonlocal' statement when you need it?
- with condition:
- wait_cell[0] = True
- yield
- with condition:
- wait_cell[0] = False
- condition.notify_all()
- def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- with condition:
- while wait_cell[0]:
- condition.wait()
- service = _CreateService()
- with wait():
- responses = service.stub.HalfDuplexCall(
- half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT)
- # half-duplex waits for the client to send all info
- with self.assertRaises(grpc.RpcError) as exception_context:
+ responses.cancel()
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
+
+ def testStreamingOutputCallFailed(self):
+ service = _CreateService()
+ request = _streaming_output_request()
+ with service.servicer_methods.fail():
+ responses = service.stub.StreamingOutputCall(request)
+ self.assertIsNotNone(responses)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.UNKNOWN)
+
+ def testStreamingInputCall(self):
+ service = _CreateService()
+ response = service.stub.StreamingInputCall(
+ _streaming_input_request_iterator())
+ expected_response = service.servicer_methods.StreamingInputCall(
+ _streaming_input_request_iterator(), 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testStreamingInputCallFuture(self):
+ service = _CreateService()
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator())
+ response = response_future.result()
+ expected_response = service.servicer_methods.StreamingInputCall(
+ _streaming_input_request_iterator(), 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testStreamingInputCallFutureExpired(self):
+ service = _CreateService()
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(),
+ timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(response_future.exception().code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testStreamingInputCallFutureCancelled(self):
+ service = _CreateService()
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator())
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+
+ def testStreamingInputCallFutureFailed(self):
+ service = _CreateService()
+ with service.servicer_methods.fail():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator())
+ self.assertIsNotNone(response_future.exception())
+ self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
+
+ def testFullDuplexCall(self):
+ service = _CreateService()
+ responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
+ expected_responses = service.servicer_methods.FullDuplexCall(
+ _full_duplex_request_iterator(), 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(expected_responses,
+ responses):
+ self.assertEqual(expected_response, response)
+
+ def testFullDuplexCallExpired(self):
+ request_iterator = _full_duplex_request_iterator()
+ service = _CreateService()
+ with service.servicer_methods.pause():
+ responses = service.stub.FullDuplexCall(
+ request_iterator, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ list(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testFullDuplexCallCancelled(self):
+ service = _CreateService()
+ request_iterator = _full_duplex_request_iterator()
+ responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
- self.assertIs(
- exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+ responses.cancel()
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.CANCELLED)
+
+ def testFullDuplexCallFailed(self):
+ request_iterator = _full_duplex_request_iterator()
+ service = _CreateService()
+ with service.servicer_methods.fail():
+ responses = service.stub.FullDuplexCall(request_iterator)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.UNKNOWN)
+
+ def testHalfDuplexCall(self):
+ service = _CreateService()
+
+ def half_duplex_request_iterator():
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
+
+ responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
+ expected_responses = service.servicer_methods.HalfDuplexCall(
+ half_duplex_request_iterator(), 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(expected_responses,
+ responses):
+ self.assertEqual(expected_response, response)
+
+ def testHalfDuplexCallWedged(self):
+ condition = threading.Condition()
+ wait_cell = [False]
+
+ @contextlib.contextmanager
+ def wait(): # pylint: disable=invalid-name
+ # Where's Python 3's 'nonlocal' statement when you need it?
+ with condition:
+ wait_cell[0] = True
+ yield
+ with condition:
+ wait_cell[0] = False
+ condition.notify_all()
+
+ def half_duplex_request_iterator():
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ with condition:
+ while wait_cell[0]:
+ condition.wait()
+
+ service = _CreateService()
+ with wait():
+ responses = service.stub.HalfDuplexCall(
+ half_duplex_request_iterator(),
+ timeout=test_constants.SHORT_TIMEOUT)
+ # half-duplex waits for the client to send all info
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(),
+ grpc.StatusCode.DEADLINE_EXCEEDED)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
index f8ae05bb7a..bcc01f3978 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
@@ -49,256 +49,264 @@ from tests.unit.framework.common import test_constants
_MESSAGES_IMPORT = b'import "messages.proto";'
+
@contextlib.contextmanager
def _system_path(path):
- old_system_path = sys.path[:]
- sys.path = sys.path[0:1] + path + sys.path[1:]
- yield
- sys.path = old_system_path
+ old_system_path = sys.path[:]
+ sys.path = sys.path[0:1] + path + sys.path[1:]
+ yield
+ sys.path = old_system_path
class DummySplitServicer(object):
- def __init__(self, request_class, response_class):
- self.request_class = request_class
- self.response_class = response_class
+ def __init__(self, request_class, response_class):
+ self.request_class = request_class
+ self.response_class = response_class
- def Call(self, request, context):
- return self.response_class()
+ def Call(self, request, context):
+ return self.response_class()
class SeparateTestMixin(object):
- def testImportAttributes(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- pb2.Request
- pb2.Response
- if self.should_find_services_in_pb2:
- pb2.TestServiceServicer
- else:
- with self.assertRaises(AttributeError):
- pb2.TestServiceServicer
-
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- pb2_grpc.TestServiceServicer
- with self.assertRaises(AttributeError):
- pb2_grpc.Request
- with self.assertRaises(AttributeError):
- pb2_grpc.Response
-
- def testCall(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- pb2_grpc.add_TestServiceServicer_to_server(
- DummySplitServicer(
- pb2.Request, pb2.Response), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = pb2_grpc.TestServiceStub(channel)
- request = pb2.Request()
- expected_response = pb2.Response()
- response = stub.Call(request)
- self.assertEqual(expected_response, response)
+ def testImportAttributes(self):
+ with _system_path([self.python_out_directory]):
+ pb2 = importlib.import_module(self.pb2_import)
+ pb2.Request
+ pb2.Response
+ if self.should_find_services_in_pb2:
+ pb2.TestServiceServicer
+ else:
+ with self.assertRaises(AttributeError):
+ pb2.TestServiceServicer
+
+ with _system_path([self.grpc_python_out_directory]):
+ pb2_grpc = importlib.import_module(self.pb2_grpc_import)
+ pb2_grpc.TestServiceServicer
+ with self.assertRaises(AttributeError):
+ pb2_grpc.Request
+ with self.assertRaises(AttributeError):
+ pb2_grpc.Response
+
+ def testCall(self):
+ with _system_path([self.python_out_directory]):
+ pb2 = importlib.import_module(self.pb2_import)
+ with _system_path([self.grpc_python_out_directory]):
+ pb2_grpc = importlib.import_module(self.pb2_grpc_import)
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ pb2_grpc.add_TestServiceServicer_to_server(
+ DummySplitServicer(pb2.Request, pb2.Response), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = pb2_grpc.TestServiceStub(channel)
+ request = pb2.Request()
+ expected_response = pb2.Response()
+ response = stub.Call(request)
+ self.assertEqual(expected_response, response)
class CommonTestMixin(object):
- def testImportAttributes(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- pb2.Request
- pb2.Response
- if self.should_find_services_in_pb2:
- pb2.TestServiceServicer
- else:
- with self.assertRaises(AttributeError):
- pb2.TestServiceServicer
-
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- pb2_grpc.TestServiceServicer
- with self.assertRaises(AttributeError):
- pb2_grpc.Request
- with self.assertRaises(AttributeError):
- pb2_grpc.Response
-
- def testCall(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- pb2_grpc.add_TestServiceServicer_to_server(
- DummySplitServicer(
- pb2.Request, pb2.Response), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = pb2_grpc.TestServiceStub(channel)
- request = pb2.Request()
- expected_response = pb2.Response()
- response = stub.Call(request)
- self.assertEqual(expected_response, response)
+ def testImportAttributes(self):
+ with _system_path([self.python_out_directory]):
+ pb2 = importlib.import_module(self.pb2_import)
+ pb2.Request
+ pb2.Response
+ if self.should_find_services_in_pb2:
+ pb2.TestServiceServicer
+ else:
+ with self.assertRaises(AttributeError):
+ pb2.TestServiceServicer
+
+ with _system_path([self.grpc_python_out_directory]):
+ pb2_grpc = importlib.import_module(self.pb2_grpc_import)
+ pb2_grpc.TestServiceServicer
+ with self.assertRaises(AttributeError):
+ pb2_grpc.Request
+ with self.assertRaises(AttributeError):
+ pb2_grpc.Response
+
+ def testCall(self):
+ with _system_path([self.python_out_directory]):
+ pb2 = importlib.import_module(self.pb2_import)
+ with _system_path([self.grpc_python_out_directory]):
+ pb2_grpc = importlib.import_module(self.pb2_grpc_import)
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ pb2_grpc.add_TestServiceServicer_to_server(
+ DummySplitServicer(pb2.Request, pb2.Response), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = pb2_grpc.TestServiceStub(channel)
+ request = pb2.Request()
+ expected_response = pb2.Response()
+ response = stub.Call(request)
+ self.assertEqual(expected_response, response)
class SameSeparateTest(unittest.TestCase, SeparateTestMixin):
- def setUp(self):
- same_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
- self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = os.path.join(self.directory, 'grpc_python_out')
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- os.makedirs(self.grpc_python_out_directory)
- same_proto_file = os.path.join(self.proto_directory, 'same_separate.proto')
- open(same_proto_file, 'wb').write(same_proto_contents)
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out=grpc_2_0:{}'.format(self.grpc_python_out_directory),
- same_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.grpc_python_out_directory, '__init__.py'), 'w').write('')
- open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('')
- self.pb2_import = 'same_separate_pb2'
- self.pb2_grpc_import = 'same_separate_pb2_grpc'
- self.should_find_services_in_pb2 = False
-
- def tearDown(self):
- shutil.rmtree(self.directory)
+ def setUp(self):
+ same_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
+ self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.')
+ self.proto_directory = os.path.join(self.directory, 'proto_path')
+ self.python_out_directory = os.path.join(self.directory, 'python_out')
+ self.grpc_python_out_directory = os.path.join(self.directory,
+ 'grpc_python_out')
+ os.makedirs(self.proto_directory)
+ os.makedirs(self.python_out_directory)
+ os.makedirs(self.grpc_python_out_directory)
+ same_proto_file = os.path.join(self.proto_directory,
+ 'same_separate.proto')
+ open(same_proto_file, 'wb').write(same_proto_contents)
+ protoc_result = protoc.main([
+ '',
+ '--proto_path={}'.format(self.proto_directory),
+ '--python_out={}'.format(self.python_out_directory),
+ '--grpc_python_out=grpc_2_0:{}'.format(
+ self.grpc_python_out_directory),
+ same_proto_file,
+ ])
+ if protoc_result != 0:
+ raise Exception("unexpected protoc error")
+ open(os.path.join(self.grpc_python_out_directory, '__init__.py'),
+ 'w').write('')
+ open(os.path.join(self.python_out_directory, '__init__.py'),
+ 'w').write('')
+ self.pb2_import = 'same_separate_pb2'
+ self.pb2_grpc_import = 'same_separate_pb2_grpc'
+ self.should_find_services_in_pb2 = False
+
+ def tearDown(self):
+ shutil.rmtree(self.directory)
class SameCommonTest(unittest.TestCase, CommonTestMixin):
- def setUp(self):
- same_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
- self.directory = tempfile.mkdtemp(suffix='same_common', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = self.python_out_directory
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- same_proto_file = os.path.join(self.proto_directory, 'same_common.proto')
- open(same_proto_file, 'wb').write(same_proto_contents)
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out={}'.format(self.grpc_python_out_directory),
- same_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('')
- self.pb2_import = 'same_common_pb2'
- self.pb2_grpc_import = 'same_common_pb2_grpc'
- self.should_find_services_in_pb2 = True
-
- def tearDown(self):
- shutil.rmtree(self.directory)
+ def setUp(self):
+ same_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
+ self.directory = tempfile.mkdtemp(suffix='same_common', dir='.')
+ self.proto_directory = os.path.join(self.directory, 'proto_path')
+ self.python_out_directory = os.path.join(self.directory, 'python_out')
+ self.grpc_python_out_directory = self.python_out_directory
+ os.makedirs(self.proto_directory)
+ os.makedirs(self.python_out_directory)
+ same_proto_file = os.path.join(self.proto_directory,
+ 'same_common.proto')
+ open(same_proto_file, 'wb').write(same_proto_contents)
+ protoc_result = protoc.main([
+ '',
+ '--proto_path={}'.format(self.proto_directory),
+ '--python_out={}'.format(self.python_out_directory),
+ '--grpc_python_out={}'.format(self.grpc_python_out_directory),
+ same_proto_file,
+ ])
+ if protoc_result != 0:
+ raise Exception("unexpected protoc error")
+ open(os.path.join(self.python_out_directory, '__init__.py'),
+ 'w').write('')
+ self.pb2_import = 'same_common_pb2'
+ self.pb2_grpc_import = 'same_common_pb2_grpc'
+ self.should_find_services_in_pb2 = True
+
+ def tearDown(self):
+ shutil.rmtree(self.directory)
class SplitCommonTest(unittest.TestCase, CommonTestMixin):
- def setUp(self):
- services_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_services',
- 'services.proto')
- messages_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_messages',
- 'messages.proto')
- self.directory = tempfile.mkdtemp(suffix='split_common', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = self.python_out_directory
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- services_proto_file = os.path.join(self.proto_directory,
- 'split_common_services.proto')
- messages_proto_file = os.path.join(self.proto_directory,
- 'split_common_messages.proto')
- open(services_proto_file, 'wb').write(services_proto_contents.replace(
- _MESSAGES_IMPORT,
- b'import "split_common_messages.proto";'
- ))
- open(messages_proto_file, 'wb').write(messages_proto_contents)
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out={}'.format(self.grpc_python_out_directory),
- services_proto_file,
- messages_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('')
- self.pb2_import = 'split_common_messages_pb2'
- self.pb2_grpc_import = 'split_common_services_pb2_grpc'
- self.should_find_services_in_pb2 = False
-
- def tearDown(self):
- shutil.rmtree(self.directory)
+ def setUp(self):
+ services_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing.split_services',
+ 'services.proto')
+ messages_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing.split_messages',
+ 'messages.proto')
+ self.directory = tempfile.mkdtemp(suffix='split_common', dir='.')
+ self.proto_directory = os.path.join(self.directory, 'proto_path')
+ self.python_out_directory = os.path.join(self.directory, 'python_out')
+ self.grpc_python_out_directory = self.python_out_directory
+ os.makedirs(self.proto_directory)
+ os.makedirs(self.python_out_directory)
+ services_proto_file = os.path.join(self.proto_directory,
+ 'split_common_services.proto')
+ messages_proto_file = os.path.join(self.proto_directory,
+ 'split_common_messages.proto')
+ open(services_proto_file, 'wb').write(
+ services_proto_contents.replace(
+ _MESSAGES_IMPORT, b'import "split_common_messages.proto";'))
+ open(messages_proto_file, 'wb').write(messages_proto_contents)
+ protoc_result = protoc.main([
+ '',
+ '--proto_path={}'.format(self.proto_directory),
+ '--python_out={}'.format(self.python_out_directory),
+ '--grpc_python_out={}'.format(self.grpc_python_out_directory),
+ services_proto_file,
+ messages_proto_file,
+ ])
+ if protoc_result != 0:
+ raise Exception("unexpected protoc error")
+ open(os.path.join(self.python_out_directory, '__init__.py'),
+ 'w').write('')
+ self.pb2_import = 'split_common_messages_pb2'
+ self.pb2_grpc_import = 'split_common_services_pb2_grpc'
+ self.should_find_services_in_pb2 = False
+
+ def tearDown(self):
+ shutil.rmtree(self.directory)
class SplitSeparateTest(unittest.TestCase, SeparateTestMixin):
- def setUp(self):
- services_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_services',
- 'services.proto')
- messages_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_messages',
- 'messages.proto')
- self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = os.path.join(self.directory, 'grpc_python_out')
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- os.makedirs(self.grpc_python_out_directory)
- services_proto_file = os.path.join(self.proto_directory,
- 'split_separate_services.proto')
- messages_proto_file = os.path.join(self.proto_directory,
- 'split_separate_messages.proto')
- open(services_proto_file, 'wb').write(services_proto_contents.replace(
- _MESSAGES_IMPORT,
- b'import "split_separate_messages.proto";'
- ))
- open(messages_proto_file, 'wb').write(messages_proto_contents)
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out=grpc_2_0:{}'.format(self.grpc_python_out_directory),
- services_proto_file,
- messages_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('')
- self.pb2_import = 'split_separate_messages_pb2'
- self.pb2_grpc_import = 'split_separate_services_pb2_grpc'
- self.should_find_services_in_pb2 = False
-
- def tearDown(self):
- shutil.rmtree(self.directory)
+ def setUp(self):
+ services_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing.split_services',
+ 'services.proto')
+ messages_proto_contents = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing.split_messages',
+ 'messages.proto')
+ self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.')
+ self.proto_directory = os.path.join(self.directory, 'proto_path')
+ self.python_out_directory = os.path.join(self.directory, 'python_out')
+ self.grpc_python_out_directory = os.path.join(self.directory,
+ 'grpc_python_out')
+ os.makedirs(self.proto_directory)
+ os.makedirs(self.python_out_directory)
+ os.makedirs(self.grpc_python_out_directory)
+ services_proto_file = os.path.join(self.proto_directory,
+ 'split_separate_services.proto')
+ messages_proto_file = os.path.join(self.proto_directory,
+ 'split_separate_messages.proto')
+ open(services_proto_file, 'wb').write(
+ services_proto_contents.replace(
+ _MESSAGES_IMPORT, b'import "split_separate_messages.proto";'))
+ open(messages_proto_file, 'wb').write(messages_proto_contents)
+ protoc_result = protoc.main([
+ '',
+ '--proto_path={}'.format(self.proto_directory),
+ '--python_out={}'.format(self.python_out_directory),
+ '--grpc_python_out=grpc_2_0:{}'.format(
+ self.grpc_python_out_directory),
+ services_proto_file,
+ messages_proto_file,
+ ])
+ if protoc_result != 0:
+ raise Exception("unexpected protoc error")
+ open(os.path.join(self.python_out_directory, '__init__.py'),
+ 'w').write('')
+ self.pb2_import = 'split_separate_messages_pb2'
+ self.pb2_grpc_import = 'split_separate_services_pb2_grpc'
+ self.should_find_services_in_pb2 = False
+
+ def tearDown(self):
+ shutil.rmtree(self.directory)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
index 1eba9c9354..f64f4e962b 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
@@ -64,84 +64,84 @@ STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
class _ServicerMethods(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._paused = False
- self._fail = False
-
- @contextlib.contextmanager
- def pause(self): # pylint: disable=invalid-name
- with self._condition:
- self._paused = True
- yield
- with self._condition:
- self._paused = False
- self._condition.notify_all()
-
- @contextlib.contextmanager
- def fail(self): # pylint: disable=invalid-name
- with self._condition:
- self._fail = True
- yield
- with self._condition:
- self._fail = False
-
- def _control(self): # pylint: disable=invalid-name
- with self._condition:
- if self._fail:
- raise ValueError()
- while self._paused:
- self._condition.wait()
-
- def UnaryCall(self, request, unused_rpc_context):
- response = response_pb2.SimpleResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * request.response_size
- self._control()
- return response
-
- def StreamingOutputCall(self, request, unused_rpc_context):
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
- self._control()
- yield response
-
- def StreamingInputCall(self, request_iter, unused_rpc_context):
- response = response_pb2.StreamingInputCallResponse()
- aggregated_payload_size = 0
- for request in request_iter:
- aggregated_payload_size += len(request.payload.payload_compressable)
- response.aggregated_payload_size = aggregated_payload_size
- self._control()
- return response
-
- def FullDuplexCall(self, request_iter, unused_rpc_context):
- for request in request_iter:
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
- self._control()
- yield response
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._paused = False
+ self._fail = False
+
+ @contextlib.contextmanager
+ def pause(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
- def HalfDuplexCall(self, request_iter, unused_rpc_context):
- responses = []
- for request in request_iter:
- for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
+ @contextlib.contextmanager
+ def fail(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._fail = True
+ yield
+ with self._condition:
+ self._fail = False
+
+ def _control(self): # pylint: disable=invalid-name
+ with self._condition:
+ if self._fail:
+ raise ValueError()
+ while self._paused:
+ self._condition.wait()
+
+ def UnaryCall(self, request, unused_rpc_context):
+ response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
- response.payload.payload_compressable = 'a' * parameter.size
+ response.payload.payload_compressable = 'a' * request.response_size
+ self._control()
+ return response
+
+ def StreamingOutputCall(self, request, unused_rpc_context):
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def StreamingInputCall(self, request_iter, unused_rpc_context):
+ response = response_pb2.StreamingInputCallResponse()
+ aggregated_payload_size = 0
+ for request in request_iter:
+ aggregated_payload_size += len(request.payload.payload_compressable)
+ response.aggregated_payload_size = aggregated_payload_size
self._control()
- responses.append(response)
- for response in responses:
- yield response
+ return response
+
+ def FullDuplexCall(self, request_iter, unused_rpc_context):
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def HalfDuplexCall(self, request_iter, unused_rpc_context):
+ responses = []
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ responses.append(response)
+ for response in responses:
+ yield response
@contextlib.contextmanager
def _CreateService():
- """Provides a servicer backend and a stub.
+ """Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
@@ -151,38 +151,38 @@ def _CreateService():
the service bound to the stub and and stub is the stub on which to invoke
RPCs.
"""
- servicer_methods = _ServicerMethods()
+ servicer_methods = _ServicerMethods()
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
- def UnaryCall(self, request, context):
- return servicer_methods.UnaryCall(request, context)
+ def UnaryCall(self, request, context):
+ return servicer_methods.UnaryCall(request, context)
- def StreamingOutputCall(self, request, context):
- return servicer_methods.StreamingOutputCall(request, context)
+ def StreamingOutputCall(self, request, context):
+ return servicer_methods.StreamingOutputCall(request, context)
- def StreamingInputCall(self, request_iter, context):
- return servicer_methods.StreamingInputCall(request_iter, context)
+ def StreamingInputCall(self, request_iter, context):
+ return servicer_methods.StreamingInputCall(request_iter, context)
- def FullDuplexCall(self, request_iter, context):
- return servicer_methods.FullDuplexCall(request_iter, context)
+ def FullDuplexCall(self, request_iter, context):
+ return servicer_methods.FullDuplexCall(request_iter, context)
- def HalfDuplexCall(self, request_iter, context):
- return servicer_methods.HalfDuplexCall(request_iter, context)
+ def HalfDuplexCall(self, request_iter, context):
+ return servicer_methods.HalfDuplexCall(request_iter, context)
- servicer = Servicer()
- server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = implementations.insecure_channel('localhost', port)
- stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
- yield (servicer_methods, stub)
- server.stop(0)
+ servicer = Servicer()
+ server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = implementations.insecure_channel('localhost', port)
+ stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
+ yield (servicer_methods, stub)
+ server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService():
- """Provides a servicer backend that fails to implement methods and its stub.
+ """Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
@@ -194,297 +194,297 @@ def _CreateIncompleteService():
RPCs.
"""
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
- pass
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ pass
- servicer = Servicer()
- server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = implementations.insecure_channel('localhost', port)
- stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
- yield None, stub
- server.stop(0)
+ servicer = Servicer()
+ server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = implementations.insecure_channel('localhost', port)
+ stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
+ yield None, stub
+ server.stop(0)
def _streaming_input_request_iterator():
- for _ in range(3):
- request = request_pb2.StreamingInputCallRequest()
- request.payload.payload_type = payload_pb2.COMPRESSABLE
- request.payload.payload_compressable = 'a'
- yield request
+ for _ in range(3):
+ request = request_pb2.StreamingInputCallRequest()
+ request.payload.payload_type = payload_pb2.COMPRESSABLE
+ request.payload.payload_compressable = 'a'
+ yield request
def _streaming_output_request():
- request = request_pb2.StreamingOutputCallRequest()
- sizes = [1, 2, 3]
- request.response_parameters.add(size=sizes[0], interval_us=0)
- request.response_parameters.add(size=sizes[1], interval_us=0)
- request.response_parameters.add(size=sizes[2], interval_us=0)
- return request
+ request = request_pb2.StreamingOutputCallRequest()
+ sizes = [1, 2, 3]
+ request.response_parameters.add(size=sizes[0], interval_us=0)
+ request.response_parameters.add(size=sizes[1], interval_us=0)
+ request.response_parameters.add(size=sizes[2], interval_us=0)
+ return request
def _full_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=2, interval_us=0)
- request.response_parameters.add(size=3, interval_us=0)
- yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
class PythonPluginTest(unittest.TestCase):
- """Test case for the gRPC Python protoc-plugin.
+ """Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
- def testImportAttributes(self):
- # check that we can access the generated module and its members.
- self.assertIsNotNone(
- getattr(service_pb2, SERVICER_IDENTIFIER, None))
- self.assertIsNotNone(
- getattr(service_pb2, STUB_IDENTIFIER, None))
- self.assertIsNotNone(
- getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None))
- self.assertIsNotNone(
- getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None))
-
- def testUpDown(self):
- with _CreateService():
- request_pb2.SimpleRequest(response_size=13)
-
- def testIncompleteServicer(self):
- with _CreateIncompleteService() as (_, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- try:
- stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
- except face.AbortionError as error:
- self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED, error.code)
-
- def testUnaryCall(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
- expected_response = methods.UnaryCall(request, 'not a real context!')
- self.assertEqual(expected_response, response)
-
- def testUnaryCallFuture(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- # Check that the call does not block waiting for the server to respond.
- with methods.pause():
- response_future = stub.UnaryCall.future(
- request, test_constants.LONG_TIMEOUT)
- response = response_future.result()
- expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testUnaryCallFutureExpired(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- with methods.pause():
- response_future = stub.UnaryCall.future(
- request, test_constants.SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
-
- def testUnaryCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- with methods.pause():
- response_future = stub.UnaryCall.future(request, 1)
- response_future.cancel()
- self.assertTrue(response_future.cancelled())
-
- def testUnaryCallFutureFailed(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
- with methods.fail():
- response_future = stub.UnaryCall.future(
- request, test_constants.LONG_TIMEOUT)
- self.assertIsNotNone(response_future.exception())
-
- def testStreamingOutputCall(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
- responses = stub.StreamingOutputCall(
- request, test_constants.LONG_TIMEOUT)
- expected_responses = methods.StreamingOutputCall(
- request, 'not a real RpcContext!')
- for expected_response, response in moves.zip_longest(
- expected_responses, responses):
+ def testImportAttributes(self):
+ # check that we can access the generated module and its members.
+ self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None))
+
+ def testUpDown(self):
+ with _CreateService():
+ request_pb2.SimpleRequest(response_size=13)
+
+ def testIncompleteServicer(self):
+ with _CreateIncompleteService() as (_, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ try:
+ stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
+ except face.AbortionError as error:
+ self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED,
+ error.code)
+
+ def testUnaryCall(self):
+ with _CreateService() as (methods, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
+ expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
- def testStreamingOutputCallExpired(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
- with methods.pause():
- responses = stub.StreamingOutputCall(
- request, test_constants.SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(responses)
-
- def testStreamingOutputCallCancelled(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
- responses = stub.StreamingOutputCall(
- request, test_constants.LONG_TIMEOUT)
- next(responses)
- responses.cancel()
- with self.assertRaises(face.CancellationError):
- next(responses)
-
- def testStreamingOutputCallFailed(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
- with methods.fail():
- responses = stub.StreamingOutputCall(request, 1)
- self.assertIsNotNone(responses)
- with self.assertRaises(face.RemoteError):
- next(responses)
-
- def testStreamingInputCall(self):
- with _CreateService() as (methods, stub):
- response = stub.StreamingInputCall(
- _streaming_input_request_iterator(),
- test_constants.LONG_TIMEOUT)
- expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(),
- 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testStreamingInputCallFuture(self):
- with _CreateService() as (methods, stub):
- with methods.pause():
- response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
- test_constants.LONG_TIMEOUT)
- response = response_future.result()
- expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(),
- 'not a real RpcContext!')
- self.assertEqual(expected_response, response)
-
- def testStreamingInputCallFutureExpired(self):
- with _CreateService() as (methods, stub):
- with methods.pause():
- response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
- test_constants.SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsInstance(
- response_future.exception(), face.ExpirationError)
-
- def testStreamingInputCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
- with methods.pause():
- response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
- test_constants.LONG_TIMEOUT)
- response_future.cancel()
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(future.CancelledError):
- response_future.result()
-
- def testStreamingInputCallFutureFailed(self):
- with _CreateService() as (methods, stub):
- with methods.fail():
- response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
- test_constants.LONG_TIMEOUT)
- self.assertIsNotNone(response_future.exception())
-
- def testFullDuplexCall(self):
- with _CreateService() as (methods, stub):
- responses = stub.FullDuplexCall(
- _full_duplex_request_iterator(),
- test_constants.LONG_TIMEOUT)
- expected_responses = methods.FullDuplexCall(
- _full_duplex_request_iterator(),
- 'not a real RpcContext!')
- for expected_response, response in moves.zip_longest(
- expected_responses, responses):
+ def testUnaryCallFuture(self):
+ with _CreateService() as (methods, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ # Check that the call does not block waiting for the server to respond.
+ with methods.pause():
+ response_future = stub.UnaryCall.future(
+ request, test_constants.LONG_TIMEOUT)
+ response = response_future.result()
+ expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
- def testFullDuplexCallExpired(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
- with methods.pause():
- responses = stub.FullDuplexCall(
- request_iterator, test_constants.SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(responses)
-
- def testFullDuplexCallCancelled(self):
- with _CreateService() as (methods, stub):
- request_iterator = _full_duplex_request_iterator()
- responses = stub.FullDuplexCall(
- request_iterator, test_constants.LONG_TIMEOUT)
- next(responses)
- responses.cancel()
- with self.assertRaises(face.CancellationError):
- next(responses)
-
- def testFullDuplexCallFailed(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
- with methods.fail():
- responses = stub.FullDuplexCall(
- request_iterator, test_constants.LONG_TIMEOUT)
- self.assertIsNotNone(responses)
- with self.assertRaises(face.RemoteError):
- next(responses)
-
- def testHalfDuplexCall(self):
- with _CreateService() as (methods, stub):
- def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=2, interval_us=0)
- request.response_parameters.add(size=3, interval_us=0)
- yield request
- responses = stub.HalfDuplexCall(
- half_duplex_request_iterator(), test_constants.LONG_TIMEOUT)
- expected_responses = methods.HalfDuplexCall(
- half_duplex_request_iterator(), 'not a real RpcContext!')
- for check in moves.zip_longest(expected_responses, responses):
- expected_response, response = check
+ def testUnaryCallFutureExpired(self):
+ with _CreateService() as (methods, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ with methods.pause():
+ response_future = stub.UnaryCall.future(
+ request, test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+
+ def testUnaryCallFutureCancelled(self):
+ with _CreateService() as (methods, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ with methods.pause():
+ response_future = stub.UnaryCall.future(request, 1)
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+
+ def testUnaryCallFutureFailed(self):
+ with _CreateService() as (methods, stub):
+ request = request_pb2.SimpleRequest(response_size=13)
+ with methods.fail():
+ response_future = stub.UnaryCall.future(
+ request, test_constants.LONG_TIMEOUT)
+ self.assertIsNotNone(response_future.exception())
+
+ def testStreamingOutputCall(self):
+ with _CreateService() as (methods, stub):
+ request = _streaming_output_request()
+ responses = stub.StreamingOutputCall(request,
+ test_constants.LONG_TIMEOUT)
+ expected_responses = methods.StreamingOutputCall(
+ request, 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(
+ expected_responses, responses):
+ self.assertEqual(expected_response, response)
+
+ def testStreamingOutputCallExpired(self):
+ with _CreateService() as (methods, stub):
+ request = _streaming_output_request()
+ with methods.pause():
+ responses = stub.StreamingOutputCall(
+ request, test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ list(responses)
+
+ def testStreamingOutputCallCancelled(self):
+ with _CreateService() as (methods, stub):
+ request = _streaming_output_request()
+ responses = stub.StreamingOutputCall(request,
+ test_constants.LONG_TIMEOUT)
+ next(responses)
+ responses.cancel()
+ with self.assertRaises(face.CancellationError):
+ next(responses)
+
+ def testStreamingOutputCallFailed(self):
+ with _CreateService() as (methods, stub):
+ request = _streaming_output_request()
+ with methods.fail():
+ responses = stub.StreamingOutputCall(request, 1)
+ self.assertIsNotNone(responses)
+ with self.assertRaises(face.RemoteError):
+ next(responses)
+
+ def testStreamingInputCall(self):
+ with _CreateService() as (methods, stub):
+ response = stub.StreamingInputCall(
+ _streaming_input_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ expected_response = methods.StreamingInputCall(
+ _streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
- def testHalfDuplexCallWedged(self):
- condition = threading.Condition()
- wait_cell = [False]
- @contextlib.contextmanager
- def wait(): # pylint: disable=invalid-name
- # Where's Python 3's 'nonlocal' statement when you need it?
- with condition:
- wait_cell[0] = True
- yield
- with condition:
- wait_cell[0] = False
- condition.notify_all()
- def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
- request.response_parameters.add(size=1, interval_us=0)
- yield request
- with condition:
- while wait_cell[0]:
- condition.wait()
- with _CreateService() as (methods, stub):
- with wait():
- responses = stub.HalfDuplexCall(
- half_duplex_request_iterator(), test_constants.SHORT_TIMEOUT)
- # half-duplex waits for the client to send all info
- with self.assertRaises(face.ExpirationError):
- next(responses)
+ def testStreamingInputCallFuture(self):
+ with _CreateService() as (methods, stub):
+ with methods.pause():
+ response_future = stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ response = response_future.result()
+ expected_response = methods.StreamingInputCall(
+ _streaming_input_request_iterator(), 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testStreamingInputCallFutureExpired(self):
+ with _CreateService() as (methods, stub):
+ with methods.pause():
+ response_future = stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(),
+ test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+ self.assertIsInstance(response_future.exception(),
+ face.ExpirationError)
+
+ def testStreamingInputCallFutureCancelled(self):
+ with _CreateService() as (methods, stub):
+ with methods.pause():
+ response_future = stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(future.CancelledError):
+ response_future.result()
+
+ def testStreamingInputCallFutureFailed(self):
+ with _CreateService() as (methods, stub):
+ with methods.fail():
+ response_future = stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ self.assertIsNotNone(response_future.exception())
+
+ def testFullDuplexCall(self):
+ with _CreateService() as (methods, stub):
+ responses = stub.FullDuplexCall(_full_duplex_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ expected_responses = methods.FullDuplexCall(
+ _full_duplex_request_iterator(), 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(
+ expected_responses, responses):
+ self.assertEqual(expected_response, response)
+
+ def testFullDuplexCallExpired(self):
+ request_iterator = _full_duplex_request_iterator()
+ with _CreateService() as (methods, stub):
+ with methods.pause():
+ responses = stub.FullDuplexCall(request_iterator,
+ test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ list(responses)
+
+ def testFullDuplexCallCancelled(self):
+ with _CreateService() as (methods, stub):
+ request_iterator = _full_duplex_request_iterator()
+ responses = stub.FullDuplexCall(request_iterator,
+ test_constants.LONG_TIMEOUT)
+ next(responses)
+ responses.cancel()
+ with self.assertRaises(face.CancellationError):
+ next(responses)
+
+ def testFullDuplexCallFailed(self):
+ request_iterator = _full_duplex_request_iterator()
+ with _CreateService() as (methods, stub):
+ with methods.fail():
+ responses = stub.FullDuplexCall(request_iterator,
+ test_constants.LONG_TIMEOUT)
+ self.assertIsNotNone(responses)
+ with self.assertRaises(face.RemoteError):
+ next(responses)
+
+ def testHalfDuplexCall(self):
+ with _CreateService() as (methods, stub):
+
+ def half_duplex_request_iterator():
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
+
+ responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
+ test_constants.LONG_TIMEOUT)
+ expected_responses = methods.HalfDuplexCall(
+ half_duplex_request_iterator(), 'not a real RpcContext!')
+ for check in moves.zip_longest(expected_responses, responses):
+ expected_response, response = check
+ self.assertEqual(expected_response, response)
+
+ def testHalfDuplexCallWedged(self):
+ condition = threading.Condition()
+ wait_cell = [False]
+
+ @contextlib.contextmanager
+ def wait(): # pylint: disable=invalid-name
+ # Where's Python 3's 'nonlocal' statement when you need it?
+ with condition:
+ wait_cell[0] = True
+ yield
+ with condition:
+ wait_cell[0] = False
+ condition.notify_all()
+
+ def half_duplex_request_iterator():
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ with condition:
+ while wait_cell[0]:
+ condition.wait()
+
+ with _CreateService() as (methods, stub):
+ with wait():
+ responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
+ test_constants.SHORT_TIMEOUT)
+ # half-duplex waits for the client to send all info
+ with self.assertRaises(face.ExpirationError):
+ next(responses)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py
index 2f88fa0412..100a624dc9 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py
index 2f88fa0412..100a624dc9 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py
index 2f88fa0412..100a624dc9 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py
index 650e4756e7..2e8afc8e7f 100644
--- a/src/python/grpcio_tests/tests/qps/benchmark_client.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC)."""
import abc
@@ -47,165 +46,168 @@ _TIMEOUT = 60 * 60 * 24
class GenericStub(object):
- def __init__(self, channel):
- self.UnaryCall = channel.unary_unary(
- '/grpc.testing.BenchmarkService/UnaryCall')
- self.StreamingCall = channel.stream_stream(
- '/grpc.testing.BenchmarkService/StreamingCall')
+ def __init__(self, channel):
+ self.UnaryCall = channel.unary_unary(
+ '/grpc.testing.BenchmarkService/UnaryCall')
+ self.StreamingCall = channel.stream_stream(
+ '/grpc.testing.BenchmarkService/StreamingCall')
class BenchmarkClient:
- """Benchmark client interface that exposes a non-blocking send_request()."""
-
- __metaclass__ = abc.ABCMeta
-
- def __init__(self, server, config, hist):
- # Create the stub
- if config.HasField('security_params'):
- creds = grpc.ssl_channel_credentials(resources.test_root_certificates())
- channel = test_common.test_secure_channel(
- server, creds, config.security_params.server_host_override)
- else:
- channel = grpc.insecure_channel(server)
-
- # waits for the channel to be ready before we start sending messages
- grpc.channel_ready_future(channel).result()
-
- if config.payload_config.WhichOneof('payload') == 'simple_params':
- self._generic = False
- self._stub = services_pb2.BenchmarkServiceStub(channel)
- payload = messages_pb2.Payload(
- body='\0' * config.payload_config.simple_params.req_size)
- self._request = messages_pb2.SimpleRequest(
- payload=payload,
- response_size=config.payload_config.simple_params.resp_size)
- else:
- self._generic = True
- self._stub = GenericStub(channel)
- self._request = '\0' * config.payload_config.bytebuf_params.req_size
-
- self._hist = hist
- self._response_callbacks = []
-
- def add_response_callback(self, callback):
- """callback will be invoked as callback(client, query_time)"""
- self._response_callbacks.append(callback)
-
- @abc.abstractmethod
- def send_request(self):
- """Non-blocking wrapper for a client's request operation."""
- raise NotImplementedError()
-
- def start(self):
- pass
-
- def stop(self):
- pass
-
- def _handle_response(self, client, query_time):
- self._hist.add(query_time * 1e9) # Report times in nanoseconds
- for callback in self._response_callbacks:
- callback(client, query_time)
+ """Benchmark client interface that exposes a non-blocking send_request()."""
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, server, config, hist):
+ # Create the stub
+ if config.HasField('security_params'):
+ creds = grpc.ssl_channel_credentials(
+ resources.test_root_certificates())
+ channel = test_common.test_secure_channel(
+ server, creds, config.security_params.server_host_override)
+ else:
+ channel = grpc.insecure_channel(server)
+
+ # waits for the channel to be ready before we start sending messages
+ grpc.channel_ready_future(channel).result()
+
+ if config.payload_config.WhichOneof('payload') == 'simple_params':
+ self._generic = False
+ self._stub = services_pb2.BenchmarkServiceStub(channel)
+ payload = messages_pb2.Payload(
+ body='\0' * config.payload_config.simple_params.req_size)
+ self._request = messages_pb2.SimpleRequest(
+ payload=payload,
+ response_size=config.payload_config.simple_params.resp_size)
+ else:
+ self._generic = True
+ self._stub = GenericStub(channel)
+ self._request = '\0' * config.payload_config.bytebuf_params.req_size
+
+ self._hist = hist
+ self._response_callbacks = []
+
+ def add_response_callback(self, callback):
+ """callback will be invoked as callback(client, query_time)"""
+ self._response_callbacks.append(callback)
+
+ @abc.abstractmethod
+ def send_request(self):
+ """Non-blocking wrapper for a client's request operation."""
+ raise NotImplementedError()
+
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def _handle_response(self, client, query_time):
+ self._hist.add(query_time * 1e9) # Report times in nanoseconds
+ for callback in self._response_callbacks:
+ callback(client, query_time)
class UnarySyncBenchmarkClient(BenchmarkClient):
- def __init__(self, server, config, hist):
- super(UnarySyncBenchmarkClient, self).__init__(server, config, hist)
- self._pool = futures.ThreadPoolExecutor(
- max_workers=config.outstanding_rpcs_per_channel)
+ def __init__(self, server, config, hist):
+ super(UnarySyncBenchmarkClient, self).__init__(server, config, hist)
+ self._pool = futures.ThreadPoolExecutor(
+ max_workers=config.outstanding_rpcs_per_channel)
- def send_request(self):
- # Send requests in seperate threads to support multiple outstanding rpcs
- # (See src/proto/grpc/testing/control.proto)
- self._pool.submit(self._dispatch_request)
+ def send_request(self):
+ # Send requests in seperate threads to support multiple outstanding rpcs
+ # (See src/proto/grpc/testing/control.proto)
+ self._pool.submit(self._dispatch_request)
- def stop(self):
- self._pool.shutdown(wait=True)
- self._stub = None
+ def stop(self):
+ self._pool.shutdown(wait=True)
+ self._stub = None
- def _dispatch_request(self):
- start_time = time.time()
- self._stub.UnaryCall(self._request, _TIMEOUT)
- end_time = time.time()
- self._handle_response(self, end_time - start_time)
+ def _dispatch_request(self):
+ start_time = time.time()
+ self._stub.UnaryCall(self._request, _TIMEOUT)
+ end_time = time.time()
+ self._handle_response(self, end_time - start_time)
class UnaryAsyncBenchmarkClient(BenchmarkClient):
- def send_request(self):
- # Use the Future callback api to support multiple outstanding rpcs
- start_time = time.time()
- response_future = self._stub.UnaryCall.future(self._request, _TIMEOUT)
- response_future.add_done_callback(
- lambda resp: self._response_received(start_time, resp))
+ def send_request(self):
+ # Use the Future callback api to support multiple outstanding rpcs
+ start_time = time.time()
+ response_future = self._stub.UnaryCall.future(self._request, _TIMEOUT)
+ response_future.add_done_callback(
+ lambda resp: self._response_received(start_time, resp))
- def _response_received(self, start_time, resp):
- resp.result()
- end_time = time.time()
- self._handle_response(self, end_time - start_time)
+ def _response_received(self, start_time, resp):
+ resp.result()
+ end_time = time.time()
+ self._handle_response(self, end_time - start_time)
- def stop(self):
- self._stub = None
+ def stop(self):
+ self._stub = None
class _SyncStream(object):
- def __init__(self, stub, generic, request, handle_response):
- self._stub = stub
- self._generic = generic
- self._request = request
- self._handle_response = handle_response
- self._is_streaming = False
- self._request_queue = queue.Queue()
- self._send_time_queue = queue.Queue()
-
- def send_request(self):
- self._send_time_queue.put(time.time())
- self._request_queue.put(self._request)
-
- def start(self):
- self._is_streaming = True
- response_stream = self._stub.StreamingCall(
- self._request_generator(), _TIMEOUT)
- for _ in response_stream:
- self._handle_response(
- self, time.time() - self._send_time_queue.get_nowait())
-
- def stop(self):
- self._is_streaming = False
-
- def _request_generator(self):
- while self._is_streaming:
- try:
- request = self._request_queue.get(block=True, timeout=1.0)
- yield request
- except queue.Empty:
- pass
+ def __init__(self, stub, generic, request, handle_response):
+ self._stub = stub
+ self._generic = generic
+ self._request = request
+ self._handle_response = handle_response
+ self._is_streaming = False
+ self._request_queue = queue.Queue()
+ self._send_time_queue = queue.Queue()
+
+ def send_request(self):
+ self._send_time_queue.put(time.time())
+ self._request_queue.put(self._request)
+
+ def start(self):
+ self._is_streaming = True
+ response_stream = self._stub.StreamingCall(self._request_generator(),
+ _TIMEOUT)
+ for _ in response_stream:
+ self._handle_response(
+ self, time.time() - self._send_time_queue.get_nowait())
+
+ def stop(self):
+ self._is_streaming = False
+
+ def _request_generator(self):
+ while self._is_streaming:
+ try:
+ request = self._request_queue.get(block=True, timeout=1.0)
+ yield request
+ except queue.Empty:
+ pass
class StreamingSyncBenchmarkClient(BenchmarkClient):
- def __init__(self, server, config, hist):
- super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
- self._pool = futures.ThreadPoolExecutor(
- max_workers=config.outstanding_rpcs_per_channel)
- self._streams = [_SyncStream(self._stub, self._generic,
- self._request, self._handle_response)
- for _ in xrange(config.outstanding_rpcs_per_channel)]
- self._curr_stream = 0
-
- def send_request(self):
- # Use a round_robin scheduler to determine what stream to send on
- self._streams[self._curr_stream].send_request()
- self._curr_stream = (self._curr_stream + 1) % len(self._streams)
-
- def start(self):
- for stream in self._streams:
- self._pool.submit(stream.start)
-
- def stop(self):
- for stream in self._streams:
- stream.stop()
- self._pool.shutdown(wait=True)
- self._stub = None
+ def __init__(self, server, config, hist):
+ super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
+ self._pool = futures.ThreadPoolExecutor(
+ max_workers=config.outstanding_rpcs_per_channel)
+ self._streams = [
+ _SyncStream(self._stub, self._generic, self._request,
+ self._handle_response)
+ for _ in xrange(config.outstanding_rpcs_per_channel)
+ ]
+ self._curr_stream = 0
+
+ def send_request(self):
+ # Use a round_robin scheduler to determine what stream to send on
+ self._streams[self._curr_stream].send_request()
+ self._curr_stream = (self._curr_stream + 1) % len(self._streams)
+
+ def start(self):
+ for stream in self._streams:
+ self._pool.submit(stream.start)
+
+ def stop(self):
+ for stream in self._streams:
+ stream.stop()
+ self._pool.shutdown(wait=True)
+ self._stub = None
diff --git a/src/python/grpcio_tests/tests/qps/benchmark_server.py b/src/python/grpcio_tests/tests/qps/benchmark_server.py
index 2b76b810cd..423d03b804 100644
--- a/src/python/grpcio_tests/tests/qps/benchmark_server.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_server.py
@@ -32,27 +32,27 @@ from src.proto.grpc.testing import services_pb2
class BenchmarkServer(services_pb2.BenchmarkServiceServicer):
- """Synchronous Server implementation for the Benchmark service."""
+ """Synchronous Server implementation for the Benchmark service."""
- def UnaryCall(self, request, context):
- payload = messages_pb2.Payload(body='\0' * request.response_size)
- return messages_pb2.SimpleResponse(payload=payload)
+ def UnaryCall(self, request, context):
+ payload = messages_pb2.Payload(body='\0' * request.response_size)
+ return messages_pb2.SimpleResponse(payload=payload)
- def StreamingCall(self, request_iterator, context):
- for request in request_iterator:
- payload = messages_pb2.Payload(body='\0' * request.response_size)
- yield messages_pb2.SimpleResponse(payload=payload)
+ def StreamingCall(self, request_iterator, context):
+ for request in request_iterator:
+ payload = messages_pb2.Payload(body='\0' * request.response_size)
+ yield messages_pb2.SimpleResponse(payload=payload)
class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer):
- """Generic Server implementation for the Benchmark service."""
+ """Generic Server implementation for the Benchmark service."""
- def __init__(self, resp_size):
- self._response = '\0' * resp_size
+ def __init__(self, resp_size):
+ self._response = '\0' * resp_size
- def UnaryCall(self, request, context):
- return self._response
+ def UnaryCall(self, request, context):
+ return self._response
- def StreamingCall(self, request_iterator, context):
- for request in request_iterator:
- yield self._response
+ def StreamingCall(self, request_iterator, context):
+ for request in request_iterator:
+ yield self._response
diff --git a/src/python/grpcio_tests/tests/qps/client_runner.py b/src/python/grpcio_tests/tests/qps/client_runner.py
index 1fd58687ad..037092313c 100644
--- a/src/python/grpcio_tests/tests/qps/client_runner.py
+++ b/src/python/grpcio_tests/tests/qps/client_runner.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Defines behavior for WHEN clients send requests.
Each client exposes a non-blocking send_request() method that the
@@ -39,68 +38,68 @@ import time
class ClientRunner:
- """Abstract interface for sending requests from clients."""
+ """Abstract interface for sending requests from clients."""
- __metaclass__ = abc.ABCMeta
+ __metaclass__ = abc.ABCMeta
- def __init__(self, client):
- self._client = client
+ def __init__(self, client):
+ self._client = client
- @abc.abstractmethod
- def start(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def start(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def stop(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def stop(self):
+ raise NotImplementedError()
class OpenLoopClientRunner(ClientRunner):
- def __init__(self, client, interval_generator):
- super(OpenLoopClientRunner, self).__init__(client)
- self._is_running = False
- self._interval_generator = interval_generator
- self._dispatch_thread = threading.Thread(
- target=self._dispatch_requests, args=())
-
- def start(self):
- self._is_running = True
- self._client.start()
- self._dispatch_thread.start()
-
- def stop(self):
- self._is_running = False
- self._client.stop()
- self._dispatch_thread.join()
- self._client = None
-
- def _dispatch_requests(self):
- while self._is_running:
- self._client.send_request()
- time.sleep(next(self._interval_generator))
+ def __init__(self, client, interval_generator):
+ super(OpenLoopClientRunner, self).__init__(client)
+ self._is_running = False
+ self._interval_generator = interval_generator
+ self._dispatch_thread = threading.Thread(
+ target=self._dispatch_requests, args=())
+
+ def start(self):
+ self._is_running = True
+ self._client.start()
+ self._dispatch_thread.start()
+
+ def stop(self):
+ self._is_running = False
+ self._client.stop()
+ self._dispatch_thread.join()
+ self._client = None
+
+ def _dispatch_requests(self):
+ while self._is_running:
+ self._client.send_request()
+ time.sleep(next(self._interval_generator))
class ClosedLoopClientRunner(ClientRunner):
- def __init__(self, client, request_count):
- super(ClosedLoopClientRunner, self).__init__(client)
- self._is_running = False
- self._request_count = request_count
- # Send a new request on each response for closed loop
- self._client.add_response_callback(self._send_request)
-
- def start(self):
- self._is_running = True
- self._client.start()
- for _ in xrange(self._request_count):
- self._client.send_request()
-
- def stop(self):
- self._is_running = False
- self._client.stop()
- self._client = None
-
- def _send_request(self, client, response_time):
- if self._is_running:
- client.send_request()
+ def __init__(self, client, request_count):
+ super(ClosedLoopClientRunner, self).__init__(client)
+ self._is_running = False
+ self._request_count = request_count
+ # Send a new request on each response for closed loop
+ self._client.add_response_callback(self._send_request)
+
+ def start(self):
+ self._is_running = True
+ self._client.start()
+ for _ in xrange(self._request_count):
+ self._client.send_request()
+
+ def stop(self):
+ self._is_running = False
+ self._client.stop()
+ self._client = None
+
+ def _send_request(self, client, response_time):
+ if self._is_running:
+ client.send_request()
diff --git a/src/python/grpcio_tests/tests/qps/histogram.py b/src/python/grpcio_tests/tests/qps/histogram.py
index 9a7b5eb2ba..61040b6f3b 100644
--- a/src/python/grpcio_tests/tests/qps/histogram.py
+++ b/src/python/grpcio_tests/tests/qps/histogram.py
@@ -34,52 +34,52 @@ from src.proto.grpc.testing import stats_pb2
class Histogram(object):
- """Histogram class used for recording performance testing data.
+ """Histogram class used for recording performance testing data.
This class is thread safe.
"""
- def __init__(self, resolution, max_possible):
- self._lock = threading.Lock()
- self._resolution = resolution
- self._max_possible = max_possible
- self._sum = 0
- self._sum_of_squares = 0
- self.multiplier = 1.0 + self._resolution
- self._count = 0
- self._min = self._max_possible
- self._max = 0
- self._buckets = [0] * (self._bucket_for(self._max_possible) + 1)
+ def __init__(self, resolution, max_possible):
+ self._lock = threading.Lock()
+ self._resolution = resolution
+ self._max_possible = max_possible
+ self._sum = 0
+ self._sum_of_squares = 0
+ self.multiplier = 1.0 + self._resolution
+ self._count = 0
+ self._min = self._max_possible
+ self._max = 0
+ self._buckets = [0] * (self._bucket_for(self._max_possible) + 1)
- def reset(self):
- with self._lock:
- self._sum = 0
- self._sum_of_squares = 0
- self._count = 0
- self._min = self._max_possible
- self._max = 0
- self._buckets = [0] * (self._bucket_for(self._max_possible) + 1)
+ def reset(self):
+ with self._lock:
+ self._sum = 0
+ self._sum_of_squares = 0
+ self._count = 0
+ self._min = self._max_possible
+ self._max = 0
+ self._buckets = [0] * (self._bucket_for(self._max_possible) + 1)
- def add(self, val):
- with self._lock:
- self._sum += val
- self._sum_of_squares += val * val
- self._count += 1
- self._min = min(self._min, val)
- self._max = max(self._max, val)
- self._buckets[self._bucket_for(val)] += 1
+ def add(self, val):
+ with self._lock:
+ self._sum += val
+ self._sum_of_squares += val * val
+ self._count += 1
+ self._min = min(self._min, val)
+ self._max = max(self._max, val)
+ self._buckets[self._bucket_for(val)] += 1
- def get_data(self):
- with self._lock:
- data = stats_pb2.HistogramData()
- data.bucket.extend(self._buckets)
- data.min_seen = self._min
- data.max_seen = self._max
- data.sum = self._sum
- data.sum_of_squares = self._sum_of_squares
- data.count = self._count
- return data
+ def get_data(self):
+ with self._lock:
+ data = stats_pb2.HistogramData()
+ data.bucket.extend(self._buckets)
+ data.min_seen = self._min
+ data.max_seen = self._max
+ data.sum = self._sum
+ data.sum_of_squares = self._sum_of_squares
+ data.count = self._count
+ return data
- def _bucket_for(self, val):
- val = min(val, self._max_possible)
- return int(math.log(val, self.multiplier))
+ def _bucket_for(self, val):
+ val = min(val, self._max_possible)
+ return int(math.log(val, self.multiplier))
diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py
index 2371ff0956..025dfb9d4a 100644
--- a/src/python/grpcio_tests/tests/qps/qps_worker.py
+++ b/src/python/grpcio_tests/tests/qps/qps_worker.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""The entry point for the qps worker."""
import argparse
@@ -40,22 +39,23 @@ from tests.qps import worker_server
def run_worker_server(port):
- server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
- servicer = worker_server.WorkerServer()
- services_pb2.add_WorkerServiceServicer_to_server(servicer, server)
- server.add_insecure_port('[::]:{}'.format(port))
- server.start()
- servicer.wait_for_quit()
- server.stop(0)
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
+ servicer = worker_server.WorkerServer()
+ services_pb2.add_WorkerServiceServicer_to_server(servicer, server)
+ server.add_insecure_port('[::]:{}'.format(port))
+ server.start()
+ servicer.wait_for_quit()
+ server.stop(0)
if __name__ == '__main__':
- parser = argparse.ArgumentParser(
- description='gRPC Python performance testing worker')
- parser.add_argument('--driver_port',
- type=int,
- dest='port',
- help='The port the worker should listen on')
- args = parser.parse_args()
-
- run_worker_server(args.port)
+ parser = argparse.ArgumentParser(
+ description='gRPC Python performance testing worker')
+ parser.add_argument(
+ '--driver_port',
+ type=int,
+ dest='port',
+ help='The port the worker should listen on')
+ args = parser.parse_args()
+
+ run_worker_server(args.port)
diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
index 46d542940f..1deb7ed698 100644
--- a/src/python/grpcio_tests/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -46,149 +46,156 @@ from tests.unit import resources
class WorkerServer(services_pb2.WorkerServiceServicer):
- """Python Worker Server implementation."""
-
- def __init__(self):
- self._quit_event = threading.Event()
-
- def RunServer(self, request_iterator, context):
- config = next(request_iterator).setup
- server, port = self._create_server(config)
- cores = multiprocessing.cpu_count()
- server.start()
- start_time = time.time()
- yield self._get_server_status(start_time, start_time, port, cores)
-
- for request in request_iterator:
- end_time = time.time()
- status = self._get_server_status(start_time, end_time, port, cores)
- if request.mark.reset:
- start_time = end_time
- yield status
- server.stop(None)
-
- def _get_server_status(self, start_time, end_time, port, cores):
- end_time = time.time()
- elapsed_time = end_time - start_time
- stats = stats_pb2.ServerStats(time_elapsed=elapsed_time,
- time_user=elapsed_time,
- time_system=elapsed_time)
- return control_pb2.ServerStatus(stats=stats, port=port, cores=cores)
-
- def _create_server(self, config):
- if config.async_server_threads == 0:
- # This is the default concurrent.futures thread pool size, but
- # None doesn't seem to work
- server_threads = multiprocessing.cpu_count() * 5
- else:
- server_threads = config.async_server_threads
- server = grpc.server(futures.ThreadPoolExecutor(
- max_workers=server_threads))
- if config.server_type == control_pb2.ASYNC_SERVER:
- servicer = benchmark_server.BenchmarkServer()
- services_pb2.add_BenchmarkServiceServicer_to_server(servicer, server)
- elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
- resp_size = config.payload_config.bytebuf_params.resp_size
- servicer = benchmark_server.GenericBenchmarkServer(resp_size)
- method_implementations = {
- 'StreamingCall':
- grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
- 'UnaryCall':
- grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
- }
- handler = grpc.method_handlers_generic_handler(
- 'grpc.testing.BenchmarkService', method_implementations)
- server.add_generic_rpc_handlers((handler,))
- else:
- raise Exception('Unsupported server type {}'.format(config.server_type))
-
- if config.HasField('security_params'): # Use SSL
- server_creds = grpc.ssl_server_credentials(
- ((resources.private_key(), resources.certificate_chain()),))
- port = server.add_secure_port('[::]:{}'.format(config.port), server_creds)
- else:
- port = server.add_insecure_port('[::]:{}'.format(config.port))
-
- return (server, port)
-
- def RunClient(self, request_iterator, context):
- config = next(request_iterator).setup
- client_runners = []
- qps_data = histogram.Histogram(config.histogram_params.resolution,
- config.histogram_params.max_possible)
- start_time = time.time()
-
- # Create a client for each channel
- for i in xrange(config.client_channels):
- server = config.server_targets[i % len(config.server_targets)]
- runner = self._create_client_runner(server, config, qps_data)
- client_runners.append(runner)
- runner.start()
-
- end_time = time.time()
- yield self._get_client_status(start_time, end_time, qps_data)
-
- # Respond to stat requests
- for request in request_iterator:
- end_time = time.time()
- status = self._get_client_status(start_time, end_time, qps_data)
- if request.mark.reset:
- qps_data.reset()
+ """Python Worker Server implementation."""
+
+ def __init__(self):
+ self._quit_event = threading.Event()
+
+ def RunServer(self, request_iterator, context):
+ config = next(request_iterator).setup
+ server, port = self._create_server(config)
+ cores = multiprocessing.cpu_count()
+ server.start()
start_time = time.time()
- yield status
-
- # Cleanup the clients
- for runner in client_runners:
- runner.stop()
-
- def _get_client_status(self, start_time, end_time, qps_data):
- latencies = qps_data.get_data()
- end_time = time.time()
- elapsed_time = end_time - start_time
- stats = stats_pb2.ClientStats(latencies=latencies,
- time_elapsed=elapsed_time,
- time_user=elapsed_time,
- time_system=elapsed_time)
- return control_pb2.ClientStatus(stats=stats)
-
- def _create_client_runner(self, server, config, qps_data):
- if config.client_type == control_pb2.SYNC_CLIENT:
- if config.rpc_type == control_pb2.UNARY:
- client = benchmark_client.UnarySyncBenchmarkClient(
- server, config, qps_data)
- elif config.rpc_type == control_pb2.STREAMING:
- client = benchmark_client.StreamingSyncBenchmarkClient(
- server, config, qps_data)
- elif config.client_type == control_pb2.ASYNC_CLIENT:
- if config.rpc_type == control_pb2.UNARY:
- client = benchmark_client.UnaryAsyncBenchmarkClient(
- server, config, qps_data)
- else:
- raise Exception('Async streaming client not supported')
- else:
- raise Exception('Unsupported client type {}'.format(config.client_type))
-
- # In multi-channel tests, we split the load across all channels
- load_factor = float(config.client_channels)
- if config.load_params.WhichOneof('load') == 'closed_loop':
- runner = client_runner.ClosedLoopClientRunner(
- client, config.outstanding_rpcs_per_channel)
- else: # Open loop Poisson
- alpha = config.load_params.poisson.offered_load / load_factor
- def poisson():
- while True:
- yield random.expovariate(alpha)
-
- runner = client_runner.OpenLoopClientRunner(client, poisson())
-
- return runner
-
- def CoreCount(self, request, context):
- return control_pb2.CoreResponse(cores=multiprocessing.cpu_count())
-
- def QuitWorker(self, request, context):
- self._quit_event.set()
- return control_pb2.Void()
-
- def wait_for_quit(self):
- self._quit_event.wait()
+ yield self._get_server_status(start_time, start_time, port, cores)
+
+ for request in request_iterator:
+ end_time = time.time()
+ status = self._get_server_status(start_time, end_time, port, cores)
+ if request.mark.reset:
+ start_time = end_time
+ yield status
+ server.stop(None)
+
+ def _get_server_status(self, start_time, end_time, port, cores):
+ end_time = time.time()
+ elapsed_time = end_time - start_time
+ stats = stats_pb2.ServerStats(
+ time_elapsed=elapsed_time,
+ time_user=elapsed_time,
+ time_system=elapsed_time)
+ return control_pb2.ServerStatus(stats=stats, port=port, cores=cores)
+
+ def _create_server(self, config):
+ if config.async_server_threads == 0:
+ # This is the default concurrent.futures thread pool size, but
+ # None doesn't seem to work
+ server_threads = multiprocessing.cpu_count() * 5
+ else:
+ server_threads = config.async_server_threads
+ server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=server_threads))
+ if config.server_type == control_pb2.ASYNC_SERVER:
+ servicer = benchmark_server.BenchmarkServer()
+ services_pb2.add_BenchmarkServiceServicer_to_server(servicer,
+ server)
+ elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
+ resp_size = config.payload_config.bytebuf_params.resp_size
+ servicer = benchmark_server.GenericBenchmarkServer(resp_size)
+ method_implementations = {
+ 'StreamingCall':
+ grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
+ 'UnaryCall':
+ grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
+ }
+ handler = grpc.method_handlers_generic_handler(
+ 'grpc.testing.BenchmarkService', method_implementations)
+ server.add_generic_rpc_handlers((handler,))
+ else:
+ raise Exception('Unsupported server type {}'.format(
+ config.server_type))
+
+ if config.HasField('security_params'): # Use SSL
+ server_creds = grpc.ssl_server_credentials((
+ (resources.private_key(), resources.certificate_chain()),))
+ port = server.add_secure_port('[::]:{}'.format(config.port),
+ server_creds)
+ else:
+ port = server.add_insecure_port('[::]:{}'.format(config.port))
+
+ return (server, port)
+
+ def RunClient(self, request_iterator, context):
+ config = next(request_iterator).setup
+ client_runners = []
+ qps_data = histogram.Histogram(config.histogram_params.resolution,
+ config.histogram_params.max_possible)
+ start_time = time.time()
+
+ # Create a client for each channel
+ for i in xrange(config.client_channels):
+ server = config.server_targets[i % len(config.server_targets)]
+ runner = self._create_client_runner(server, config, qps_data)
+ client_runners.append(runner)
+ runner.start()
+
+ end_time = time.time()
+ yield self._get_client_status(start_time, end_time, qps_data)
+
+ # Respond to stat requests
+ for request in request_iterator:
+ end_time = time.time()
+ status = self._get_client_status(start_time, end_time, qps_data)
+ if request.mark.reset:
+ qps_data.reset()
+ start_time = time.time()
+ yield status
+
+ # Cleanup the clients
+ for runner in client_runners:
+ runner.stop()
+
+ def _get_client_status(self, start_time, end_time, qps_data):
+ latencies = qps_data.get_data()
+ end_time = time.time()
+ elapsed_time = end_time - start_time
+ stats = stats_pb2.ClientStats(
+ latencies=latencies,
+ time_elapsed=elapsed_time,
+ time_user=elapsed_time,
+ time_system=elapsed_time)
+ return control_pb2.ClientStatus(stats=stats)
+
+ def _create_client_runner(self, server, config, qps_data):
+ if config.client_type == control_pb2.SYNC_CLIENT:
+ if config.rpc_type == control_pb2.UNARY:
+ client = benchmark_client.UnarySyncBenchmarkClient(
+ server, config, qps_data)
+ elif config.rpc_type == control_pb2.STREAMING:
+ client = benchmark_client.StreamingSyncBenchmarkClient(
+ server, config, qps_data)
+ elif config.client_type == control_pb2.ASYNC_CLIENT:
+ if config.rpc_type == control_pb2.UNARY:
+ client = benchmark_client.UnaryAsyncBenchmarkClient(
+ server, config, qps_data)
+ else:
+ raise Exception('Async streaming client not supported')
+ else:
+ raise Exception('Unsupported client type {}'.format(
+ config.client_type))
+
+ # In multi-channel tests, we split the load across all channels
+ load_factor = float(config.client_channels)
+ if config.load_params.WhichOneof('load') == 'closed_loop':
+ runner = client_runner.ClosedLoopClientRunner(
+ client, config.outstanding_rpcs_per_channel)
+ else: # Open loop Poisson
+ alpha = config.load_params.poisson.offered_load / load_factor
+
+ def poisson():
+ while True:
+ yield random.expovariate(alpha)
+
+ runner = client_runner.OpenLoopClientRunner(client, poisson())
+
+ return runner
+
+ def CoreCount(self, request, context):
+ return control_pb2.CoreResponse(cores=multiprocessing.cpu_count())
+
+ def QuitWorker(self, request, context):
+ self._quit_event.set()
+ return control_pb2.Void()
+
+ def wait_for_quit(self):
+ self._quit_event.wait()
diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
index 43d6c971b5..76e89ca039 100644
--- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
+++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc_reflection.v1alpha.reflection."""
import unittest
@@ -45,141 +44,112 @@ from tests.unit.framework.common import test_constants
_EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto'
_EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty'
-_SERVICE_NAMES = (
- 'Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman', 'Galilei')
+_SERVICE_NAMES = ('Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman',
+ 'Galilei')
+
def _file_descriptor_to_proto(descriptor):
- proto = descriptor_pb2.FileDescriptorProto()
- descriptor.CopyToProto(proto)
- return proto.SerializeToString()
+ proto = descriptor_pb2.FileDescriptorProto()
+ descriptor.CopyToProto(proto)
+ return proto.SerializeToString()
+
class ReflectionServicerTest(unittest.TestCase):
- def setUp(self):
- servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
- server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(server_pool)
- port = self._server.add_insecure_port('[::]:0')
- reflection_pb2.add_ServerReflectionServicer_to_server(servicer, self._server)
- self._server.start()
-
- channel = grpc.insecure_channel('localhost:%d' % port)
- self._stub = reflection_pb2.ServerReflectionStub(channel)
-
- def testFileByName(self):
- requests = (
- reflection_pb2.ServerReflectionRequest(
- file_by_filename=_EMPTY_PROTO_FILE_NAME
- ),
- reflection_pb2.ServerReflectionRequest(
- file_by_filename='i-donut-exist'
- ),
- )
- responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
- expected_responses = (
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- file_descriptor_response=reflection_pb2.FileDescriptorResponse(
- file_descriptor_proto=(
- _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),
- )
- )
- ),
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.NOT_FOUND.value[0],
- error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
- )
- ),
- )
- self.assertSequenceEqual(expected_responses, responses)
-
- def testFileBySymbol(self):
- requests = (
- reflection_pb2.ServerReflectionRequest(
- file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME
- ),
- reflection_pb2.ServerReflectionRequest(
- file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'
- ),
- )
- responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
- expected_responses = (
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- file_descriptor_response=reflection_pb2.FileDescriptorResponse(
- file_descriptor_proto=(
- _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),
- )
- )
- ),
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.NOT_FOUND.value[0],
- error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
- )
- ),
- )
- self.assertSequenceEqual(expected_responses, responses)
-
- @unittest.skip('TODO(atash): implement file-containing-extension reflection '
- '(see https://github.com/google/protobuf/issues/2248)')
- def testFileContainingExtension(self):
- requests = (
- reflection_pb2.ServerReflectionRequest(
- file_containing_extension=reflection_pb2.ExtensionRequest(
- containing_type='grpc.testing.proto2.Empty',
- extension_number=125,
- ),
- ),
- reflection_pb2.ServerReflectionRequest(
- file_containing_extension=reflection_pb2.ExtensionRequest(
- containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
- extension_number=55,
- ),
- ),
- )
- responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
- expected_responses = (
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- file_descriptor_response=reflection_pb2.FileDescriptorResponse(
- file_descriptor_proto=(
- _file_descriptor_to_proto(empty_extensions_pb2.DESCRIPTOR),
- )
- )
- ),
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.NOT_FOUND.value[0],
- error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
- )
- ),
- )
- self.assertSequenceEqual(expected_responses, responses)
-
- def testListServices(self):
- requests = (
- reflection_pb2.ServerReflectionRequest(
- list_services='',
- ),
- )
- responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
- expected_responses = (
- reflection_pb2.ServerReflectionResponse(
- valid_host='',
- list_services_response=reflection_pb2.ListServiceResponse(
- service=tuple(
- reflection_pb2.ServiceResponse(name=name)
- for name in _SERVICE_NAMES
- )
- )
- ),
- )
- self.assertSequenceEqual(expected_responses, responses)
+ def setUp(self):
+ servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
+ server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ reflection_pb2.add_ServerReflectionServicer_to_server(servicer,
+ self._server)
+ self._server.start()
+
+ channel = grpc.insecure_channel('localhost:%d' % port)
+ self._stub = reflection_pb2.ServerReflectionStub(channel)
+
+ def testFileByName(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_by_filename=_EMPTY_PROTO_FILE_NAME),
+ reflection_pb2.ServerReflectionRequest(
+ file_by_filename='i-donut-exist'),)
+ responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(
+ _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),))),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )),)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testFileBySymbol(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME),
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'
+ ),)
+ responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(
+ _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),))),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )),)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ @unittest.skip(
+ 'TODO(atash): implement file-containing-extension reflection '
+ '(see https://github.com/google/protobuf/issues/2248)')
+ def testFileContainingExtension(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_extension=reflection_pb2.ExtensionRequest(
+ containing_type='grpc.testing.proto2.Empty',
+ extension_number=125,),),
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_extension=reflection_pb2.ExtensionRequest(
+ containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
+ extension_number=55,),),)
+ responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(_file_descriptor_to_proto(
+ empty_extensions_pb2.DESCRIPTOR),))),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )),)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testListServices(self):
+ requests = (reflection_pb2.ServerReflectionRequest(list_services='',),)
+ responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
+ expected_responses = (reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ list_services_response=reflection_pb2.ListServiceResponse(
+ service=tuple(
+ reflection_pb2.ServiceResponse(name=name)
+ for name in _SERVICE_NAMES))),)
+ self.assertSequenceEqual(expected_responses, responses)
+
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py
index b8116729b5..61f9e1c6b1 100644
--- a/src/python/grpcio_tests/tests/stress/client.py
+++ b/src/python/grpcio_tests/tests/stress/client.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Entry point for running stress tests."""
import argparse
@@ -46,118 +45,132 @@ from tests.stress import test_runner
def _args():
- parser = argparse.ArgumentParser(description='gRPC Python stress test client')
- parser.add_argument(
- '--server_addresses',
- help='comma seperated list of hostname:port to run servers on',
- default='localhost:8080', type=str)
- parser.add_argument(
- '--test_cases',
- help='comma seperated list of testcase:weighting of tests to run',
- default='large_unary:100',
- type=str)
- parser.add_argument(
- '--test_duration_secs',
- help='number of seconds to run the stress test',
- default=-1, type=int)
- parser.add_argument(
- '--num_channels_per_server',
- help='number of channels per server',
- default=1, type=int)
- parser.add_argument(
- '--num_stubs_per_channel',
- help='number of stubs to create per channel',
- default=1, type=int)
- parser.add_argument(
- '--metrics_port',
- help='the port to listen for metrics requests on',
- default=8081, type=int)
- parser.add_argument(
- '--use_test_ca',
- help='Whether to use our fake CA. Requires --use_tls=true',
- default=False, type=bool)
- parser.add_argument(
- '--use_tls',
- help='Whether to use TLS', default=False, type=bool)
- parser.add_argument(
- '--server_host_override', default="foo.test.google.fr",
- help='the server host to which to claim to connect', type=str)
- return parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description='gRPC Python stress test client')
+ parser.add_argument(
+ '--server_addresses',
+ help='comma seperated list of hostname:port to run servers on',
+ default='localhost:8080',
+ type=str)
+ parser.add_argument(
+ '--test_cases',
+ help='comma seperated list of testcase:weighting of tests to run',
+ default='large_unary:100',
+ type=str)
+ parser.add_argument(
+ '--test_duration_secs',
+ help='number of seconds to run the stress test',
+ default=-1,
+ type=int)
+ parser.add_argument(
+ '--num_channels_per_server',
+ help='number of channels per server',
+ default=1,
+ type=int)
+ parser.add_argument(
+ '--num_stubs_per_channel',
+ help='number of stubs to create per channel',
+ default=1,
+ type=int)
+ parser.add_argument(
+ '--metrics_port',
+ help='the port to listen for metrics requests on',
+ default=8081,
+ type=int)
+ parser.add_argument(
+ '--use_test_ca',
+ help='Whether to use our fake CA. Requires --use_tls=true',
+ default=False,
+ type=bool)
+ parser.add_argument(
+ '--use_tls', help='Whether to use TLS', default=False, type=bool)
+ parser.add_argument(
+ '--server_host_override',
+ default="foo.test.google.fr",
+ help='the server host to which to claim to connect',
+ type=str)
+ return parser.parse_args()
def _test_case_from_arg(test_case_arg):
- for test_case in methods.TestCase:
- if test_case_arg == test_case.value:
- return test_case
- else:
- raise ValueError('No test case {}!'.format(test_case_arg))
+ for test_case in methods.TestCase:
+ if test_case_arg == test_case.value:
+ return test_case
+ else:
+ raise ValueError('No test case {}!'.format(test_case_arg))
def _parse_weighted_test_cases(test_case_args):
- weighted_test_cases = {}
- for test_case_arg in test_case_args.split(','):
- name, weight = test_case_arg.split(':', 1)
- test_case = _test_case_from_arg(name)
- weighted_test_cases[test_case] = int(weight)
- return weighted_test_cases
+ weighted_test_cases = {}
+ for test_case_arg in test_case_args.split(','):
+ name, weight = test_case_arg.split(':', 1)
+ test_case = _test_case_from_arg(name)
+ weighted_test_cases[test_case] = int(weight)
+ return weighted_test_cases
+
def _get_channel(target, args):
- if args.use_tls:
- if args.use_test_ca:
- root_certificates = resources.test_root_certificates()
+ if args.use_tls:
+ if args.use_test_ca:
+ root_certificates = resources.test_root_certificates()
+ else:
+ root_certificates = None # will load default roots.
+ channel_credentials = grpc.ssl_channel_credentials(
+ root_certificates=root_certificates)
+ options = ((
+ 'grpc.ssl_target_name_override',
+ args.server_host_override,),)
+ channel = grpc.secure_channel(
+ target, channel_credentials, options=options)
else:
- root_certificates = None # will load default roots.
- channel_credentials = grpc.ssl_channel_credentials(
- root_certificates=root_certificates)
- options = (('grpc.ssl_target_name_override', args.server_host_override,),)
- channel = grpc.secure_channel(target, channel_credentials, options=options)
- else:
- channel = grpc.insecure_channel(target)
-
- # waits for the channel to be ready before we start sending messages
- grpc.channel_ready_future(channel).result()
- return channel
+ channel = grpc.insecure_channel(target)
+
+ # waits for the channel to be ready before we start sending messages
+ grpc.channel_ready_future(channel).result()
+ return channel
+
def run_test(args):
- test_cases = _parse_weighted_test_cases(args.test_cases)
- test_server_targets = args.server_addresses.split(',')
- # Propagate any client exceptions with a queue
- exception_queue = queue.Queue()
- stop_event = threading.Event()
- hist = histogram.Histogram(1, 1)
- runners = []
-
- server = grpc.server(futures.ThreadPoolExecutor(max_workers=25))
- metrics_pb2.add_MetricsServiceServicer_to_server(
- metrics_server.MetricsServer(hist), server)
- server.add_insecure_port('[::]:{}'.format(args.metrics_port))
- server.start()
-
- for test_server_target in test_server_targets:
- for _ in xrange(args.num_channels_per_server):
- channel = _get_channel(test_server_target, args)
- for _ in xrange(args.num_stubs_per_channel):
- stub = test_pb2.TestServiceStub(channel)
- runner = test_runner.TestRunner(stub, test_cases, hist,
- exception_queue, stop_event)
- runners.append(runner)
-
- for runner in runners:
- runner.start()
- try:
- timeout_secs = args.test_duration_secs
- if timeout_secs < 0:
- timeout_secs = None
- raise exception_queue.get(block=True, timeout=timeout_secs)
- except queue.Empty:
- # No exceptions thrown, success
- pass
- finally:
- stop_event.set()
+ test_cases = _parse_weighted_test_cases(args.test_cases)
+ test_server_targets = args.server_addresses.split(',')
+ # Propagate any client exceptions with a queue
+ exception_queue = queue.Queue()
+ stop_event = threading.Event()
+ hist = histogram.Histogram(1, 1)
+ runners = []
+
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=25))
+ metrics_pb2.add_MetricsServiceServicer_to_server(
+ metrics_server.MetricsServer(hist), server)
+ server.add_insecure_port('[::]:{}'.format(args.metrics_port))
+ server.start()
+
+ for test_server_target in test_server_targets:
+ for _ in xrange(args.num_channels_per_server):
+ channel = _get_channel(test_server_target, args)
+ for _ in xrange(args.num_stubs_per_channel):
+ stub = test_pb2.TestServiceStub(channel)
+ runner = test_runner.TestRunner(stub, test_cases, hist,
+ exception_queue, stop_event)
+ runners.append(runner)
+
for runner in runners:
- runner.join()
- runner = None
- server.stop(None)
+ runner.start()
+ try:
+ timeout_secs = args.test_duration_secs
+ if timeout_secs < 0:
+ timeout_secs = None
+ raise exception_queue.get(block=True, timeout=timeout_secs)
+ except queue.Empty:
+ # No exceptions thrown, success
+ pass
+ finally:
+ stop_event.set()
+ for runner in runners:
+ runner.join()
+ runner = None
+ server.stop(None)
+
if __name__ == '__main__':
- run_test(_args())
+ run_test(_args())
diff --git a/src/python/grpcio_tests/tests/stress/metrics_server.py b/src/python/grpcio_tests/tests/stress/metrics_server.py
index 33dd1d6f2a..3a4cbc27ba 100644
--- a/src/python/grpcio_tests/tests/stress/metrics_server.py
+++ b/src/python/grpcio_tests/tests/stress/metrics_server.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""MetricsService for publishing stress test qps data."""
import time
@@ -38,23 +37,23 @@ GAUGE_NAME = 'python_overall_qps'
class MetricsServer(metrics_pb2.MetricsServiceServicer):
- def __init__(self, histogram):
- self._start_time = time.time()
- self._histogram = histogram
-
- def _get_qps(self):
- count = self._histogram.get_data().count
- delta = time.time() - self._start_time
- self._histogram.reset()
- self._start_time = time.time()
- return int(count/delta)
-
- def GetAllGauges(self, request, context):
- qps = self._get_qps()
- return [metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)]
-
- def GetGauge(self, request, context):
- if request.name != GAUGE_NAME:
- raise Exception('Gauge {} does not exist'.format(request.name))
- qps = self._get_qps()
- return metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)
+ def __init__(self, histogram):
+ self._start_time = time.time()
+ self._histogram = histogram
+
+ def _get_qps(self):
+ count = self._histogram.get_data().count
+ delta = time.time() - self._start_time
+ self._histogram.reset()
+ self._start_time = time.time()
+ return int(count / delta)
+
+ def GetAllGauges(self, request, context):
+ qps = self._get_qps()
+ return [metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)]
+
+ def GetGauge(self, request, context):
+ if request.name != GAUGE_NAME:
+ raise Exception('Gauge {} does not exist'.format(request.name))
+ qps = self._get_qps()
+ return metrics_pb2.GaugeResponse(name=GAUGE_NAME, long_value=qps)
diff --git a/src/python/grpcio_tests/tests/stress/test_runner.py b/src/python/grpcio_tests/tests/stress/test_runner.py
index 88f13727e3..258abe9c21 100644
--- a/src/python/grpcio_tests/tests/stress/test_runner.py
+++ b/src/python/grpcio_tests/tests/stress/test_runner.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Thread that sends random weighted requests on a TestService stub."""
import random
@@ -36,38 +35,38 @@ import traceback
def _weighted_test_case_generator(weighted_cases):
- weight_sum = sum(weighted_cases.itervalues())
+ weight_sum = sum(weighted_cases.itervalues())
- while True:
- val = random.uniform(0, weight_sum)
- partial_sum = 0
- for case in weighted_cases:
- partial_sum += weighted_cases[case]
- if val <= partial_sum:
- yield case
- break
+ while True:
+ val = random.uniform(0, weight_sum)
+ partial_sum = 0
+ for case in weighted_cases:
+ partial_sum += weighted_cases[case]
+ if val <= partial_sum:
+ yield case
+ break
class TestRunner(threading.Thread):
- def __init__(self, stub, test_cases, hist, exception_queue, stop_event):
- super(TestRunner, self).__init__()
- self._exception_queue = exception_queue
- self._stop_event = stop_event
- self._stub = stub
- self._test_cases = _weighted_test_case_generator(test_cases)
- self._histogram = hist
+ def __init__(self, stub, test_cases, hist, exception_queue, stop_event):
+ super(TestRunner, self).__init__()
+ self._exception_queue = exception_queue
+ self._stop_event = stop_event
+ self._stub = stub
+ self._test_cases = _weighted_test_case_generator(test_cases)
+ self._histogram = hist
- def run(self):
- while not self._stop_event.is_set():
- try:
- test_case = next(self._test_cases)
- start_time = time.time()
- test_case.test_interoperability(self._stub, None)
- end_time = time.time()
- self._histogram.add((end_time - start_time)*1e9)
- except Exception as e:
- traceback.print_exc()
- self._exception_queue.put(
- Exception("An exception occured during test {}"
- .format(test_case), e))
+ def run(self):
+ while not self._stop_event.is_set():
+ try:
+ test_case = next(self._test_cases)
+ start_time = time.time()
+ test_case.test_interoperability(self._stub, None)
+ end_time = time.time()
+ self._histogram.add((end_time - start_time) * 1e9)
+ except Exception as e:
+ traceback.print_exc()
+ self._exception_queue.put(
+ Exception("An exception occured during test {}"
+ .format(test_case), e))
diff --git a/src/python/grpcio_tests/tests/unit/__init__.py b/src/python/grpcio_tests/tests/unit/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py
index 51dc425420..5435c5500c 100644
--- a/src/python/grpcio_tests/tests/unit/_api_test.py
+++ b/src/python/grpcio_tests/tests/unit/_api_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test of gRPC Python's application-layer API."""
import unittest
@@ -40,73 +39,71 @@ from tests.unit import _from_grpc_import_star
class AllTest(unittest.TestCase):
- def testAll(self):
- expected_grpc_code_elements = (
- 'FutureTimeoutError',
- 'FutureCancelledError',
- 'Future',
- 'ChannelConnectivity',
- 'StatusCode',
- 'RpcError',
- 'RpcContext',
- 'Call',
- 'ChannelCredentials',
- 'CallCredentials',
- 'AuthMetadataContext',
- 'AuthMetadataPluginCallback',
- 'AuthMetadataPlugin',
- 'ServerCredentials',
- 'UnaryUnaryMultiCallable',
- 'UnaryStreamMultiCallable',
- 'StreamUnaryMultiCallable',
- 'StreamStreamMultiCallable',
- 'Channel',
- 'ServicerContext',
- 'RpcMethodHandler',
- 'HandlerCallDetails',
- 'GenericRpcHandler',
- 'ServiceRpcHandler',
- 'Server',
- 'unary_unary_rpc_method_handler',
- 'unary_stream_rpc_method_handler',
- 'stream_unary_rpc_method_handler',
- 'stream_stream_rpc_method_handler',
- 'method_handlers_generic_handler',
- 'ssl_channel_credentials',
- 'metadata_call_credentials',
- 'access_token_call_credentials',
- 'composite_call_credentials',
- 'composite_channel_credentials',
- 'ssl_server_credentials',
- 'channel_ready_future',
- 'insecure_channel',
- 'secure_channel',
- 'server',
- )
-
- six.assertCountEqual(
- self, expected_grpc_code_elements,
- _from_grpc_import_star.GRPC_ELEMENTS)
+ def testAll(self):
+ expected_grpc_code_elements = (
+ 'FutureTimeoutError',
+ 'FutureCancelledError',
+ 'Future',
+ 'ChannelConnectivity',
+ 'StatusCode',
+ 'RpcError',
+ 'RpcContext',
+ 'Call',
+ 'ChannelCredentials',
+ 'CallCredentials',
+ 'AuthMetadataContext',
+ 'AuthMetadataPluginCallback',
+ 'AuthMetadataPlugin',
+ 'ServerCredentials',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'Channel',
+ 'ServicerContext',
+ 'RpcMethodHandler',
+ 'HandlerCallDetails',
+ 'GenericRpcHandler',
+ 'ServiceRpcHandler',
+ 'Server',
+ 'unary_unary_rpc_method_handler',
+ 'unary_stream_rpc_method_handler',
+ 'stream_unary_rpc_method_handler',
+ 'stream_stream_rpc_method_handler',
+ 'method_handlers_generic_handler',
+ 'ssl_channel_credentials',
+ 'metadata_call_credentials',
+ 'access_token_call_credentials',
+ 'composite_call_credentials',
+ 'composite_channel_credentials',
+ 'ssl_server_credentials',
+ 'channel_ready_future',
+ 'insecure_channel',
+ 'secure_channel',
+ 'server',)
+
+ six.assertCountEqual(self, expected_grpc_code_elements,
+ _from_grpc_import_star.GRPC_ELEMENTS)
class ChannelConnectivityTest(unittest.TestCase):
- def testChannelConnectivity(self):
- self.assertSequenceEqual(
- (grpc.ChannelConnectivity.IDLE,
- grpc.ChannelConnectivity.CONNECTING,
- grpc.ChannelConnectivity.READY,
- grpc.ChannelConnectivity.TRANSIENT_FAILURE,
- grpc.ChannelConnectivity.SHUTDOWN,),
- tuple(grpc.ChannelConnectivity))
+ def testChannelConnectivity(self):
+ self.assertSequenceEqual((
+ grpc.ChannelConnectivity.IDLE,
+ grpc.ChannelConnectivity.CONNECTING,
+ grpc.ChannelConnectivity.READY,
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ grpc.ChannelConnectivity.SHUTDOWN,),
+ tuple(grpc.ChannelConnectivity))
class ChannelTest(unittest.TestCase):
- def test_secure_channel(self):
- channel_credentials = grpc.ssl_channel_credentials()
- channel = grpc.secure_channel('google.com:443', channel_credentials)
+ def test_secure_channel(self):
+ channel_credentials = grpc.ssl_channel_credentials()
+ channel = grpc.secure_channel('google.com:443', channel_credentials)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_auth_test.py b/src/python/grpcio_tests/tests/unit/_auth_test.py
index c31f7b06f7..52bd1cb7ba 100644
--- a/src/python/grpcio_tests/tests/unit/_auth_test.py
+++ b/src/python/grpcio_tests/tests/unit/_auth_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of standard AuthMetadataPlugins."""
import collections
@@ -38,59 +37,59 @@ from grpc import _auth
class MockGoogleCreds(object):
- def get_access_token(self):
- token = collections.namedtuple('MockAccessTokenInfo',
- ('access_token', 'expires_in'))
- token.access_token = 'token'
- return token
+ def get_access_token(self):
+ token = collections.namedtuple('MockAccessTokenInfo',
+ ('access_token', 'expires_in'))
+ token.access_token = 'token'
+ return token
class MockExceptionGoogleCreds(object):
- def get_access_token(self):
- raise Exception()
+ def get_access_token(self):
+ raise Exception()
class GoogleCallCredentialsTest(unittest.TestCase):
- def test_google_call_credentials_success(self):
- callback_event = threading.Event()
+ def test_google_call_credentials_success(self):
+ callback_event = threading.Event()
- def mock_callback(metadata, error):
- self.assertEqual(metadata, (('authorization', 'Bearer token'),))
- self.assertIsNone(error)
- callback_event.set()
+ def mock_callback(metadata, error):
+ self.assertEqual(metadata, (('authorization', 'Bearer token'),))
+ self.assertIsNone(error)
+ callback_event.set()
- call_creds = _auth.GoogleCallCredentials(MockGoogleCreds())
- call_creds(None, mock_callback)
- self.assertTrue(callback_event.wait(1.0))
+ call_creds = _auth.GoogleCallCredentials(MockGoogleCreds())
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
- def test_google_call_credentials_error(self):
- callback_event = threading.Event()
+ def test_google_call_credentials_error(self):
+ callback_event = threading.Event()
- def mock_callback(metadata, error):
- self.assertIsNotNone(error)
- callback_event.set()
+ def mock_callback(metadata, error):
+ self.assertIsNotNone(error)
+ callback_event.set()
- call_creds = _auth.GoogleCallCredentials(MockExceptionGoogleCreds())
- call_creds(None, mock_callback)
- self.assertTrue(callback_event.wait(1.0))
+ call_creds = _auth.GoogleCallCredentials(MockExceptionGoogleCreds())
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
class AccessTokenCallCredentialsTest(unittest.TestCase):
- def test_google_call_credentials_success(self):
- callback_event = threading.Event()
+ def test_google_call_credentials_success(self):
+ callback_event = threading.Event()
- def mock_callback(metadata, error):
- self.assertEqual(metadata, (('authorization', 'Bearer token'),))
- self.assertIsNone(error)
- callback_event.set()
+ def mock_callback(metadata, error):
+ self.assertEqual(metadata, (('authorization', 'Bearer token'),))
+ self.assertIsNone(error)
+ callback_event.set()
- call_creds = _auth.AccessTokenCallCredentials('token')
- call_creds(None, mock_callback)
- self.assertTrue(callback_event.wait(1.0))
+ call_creds = _auth.AccessTokenCallCredentials('token')
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_args_test.py b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
index b46497afd6..845db777a4 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_args_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
@@ -26,17 +26,17 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of Channel Args on client/server side."""
import unittest
import grpc
+
class TestPointerWrapper(object):
- def __int__(self):
- return 123456
+ def __int__(self):
+ return 123456
TEST_CHANNEL_ARGS = (
@@ -44,17 +44,17 @@ TEST_CHANNEL_ARGS = (
('arg2', 'str_val'),
('arg3', 1),
(b'arg4', 'str_val'),
- ('arg6', TestPointerWrapper()),
-)
+ ('arg6', TestPointerWrapper()),)
class ChannelArgsTest(unittest.TestCase):
- def test_client(self):
- grpc.insecure_channel('localhost:8080', options=TEST_CHANNEL_ARGS)
+ def test_client(self):
+ grpc.insecure_channel('localhost:8080', options=TEST_CHANNEL_ARGS)
+
+ def test_server(self):
+ grpc.server(None, options=TEST_CHANNEL_ARGS)
- def test_server(self):
- grpc.server(None, options=TEST_CHANNEL_ARGS)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
index 3d9dd17ff6..d67693154b 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc._channel.Channel connectivity."""
import threading
@@ -39,125 +38,123 @@ from tests.unit import _thread_pool
def _ready_in_connectivities(connectivities):
- return grpc.ChannelConnectivity.READY in connectivities
+ return grpc.ChannelConnectivity.READY in connectivities
def _last_connectivity_is_not_ready(connectivities):
- return connectivities[-1] is not grpc.ChannelConnectivity.READY
+ return connectivities[-1] is not grpc.ChannelConnectivity.READY
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._connectivities = []
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._connectivities = []
- def update(self, connectivity):
- with self._condition:
- self._connectivities.append(connectivity)
- self._condition.notify()
+ def update(self, connectivity):
+ with self._condition:
+ self._connectivities.append(connectivity)
+ self._condition.notify()
- def connectivities(self):
- with self._condition:
- return tuple(self._connectivities)
+ def connectivities(self):
+ with self._condition:
+ return tuple(self._connectivities)
- def block_until_connectivities_satisfy(self, predicate):
- with self._condition:
- while True:
- connectivities = tuple(self._connectivities)
- if predicate(connectivities):
- return connectivities
- else:
- self._condition.wait()
+ def block_until_connectivities_satisfy(self, predicate):
+ with self._condition:
+ while True:
+ connectivities = tuple(self._connectivities)
+ if predicate(connectivities):
+ return connectivities
+ else:
+ self._condition.wait()
class ChannelConnectivityTest(unittest.TestCase):
- def test_lonely_channel_connectivity(self):
- callback = _Callback()
-
- channel = grpc.insecure_channel('localhost:12345')
- channel.subscribe(callback.update, try_to_connect=False)
- first_connectivities = callback.block_until_connectivities_satisfy(bool)
- channel.subscribe(callback.update, try_to_connect=True)
- second_connectivities = callback.block_until_connectivities_satisfy(
- lambda connectivities: 2 <= len(connectivities))
- # Wait for a connection that will never happen.
- time.sleep(test_constants.SHORT_TIMEOUT)
- third_connectivities = callback.connectivities()
- channel.unsubscribe(callback.update)
- fourth_connectivities = callback.connectivities()
- channel.unsubscribe(callback.update)
- fifth_connectivities = callback.connectivities()
-
- self.assertSequenceEqual(
- (grpc.ChannelConnectivity.IDLE,), first_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.READY, second_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.READY, third_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.READY, fourth_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.READY, fifth_connectivities)
-
- def test_immediately_connectable_channel_connectivity(self):
- thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = grpc.server(thread_pool)
- port = server.add_insecure_port('[::]:0')
- server.start()
- first_callback = _Callback()
- second_callback = _Callback()
-
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- channel.subscribe(first_callback.update, try_to_connect=False)
- first_connectivities = first_callback.block_until_connectivities_satisfy(
- bool)
- # Wait for a connection that will never happen because try_to_connect=True
- # has not yet been passed.
- time.sleep(test_constants.SHORT_TIMEOUT)
- second_connectivities = first_callback.connectivities()
- channel.subscribe(second_callback.update, try_to_connect=True)
- third_connectivities = first_callback.block_until_connectivities_satisfy(
- lambda connectivities: 2 <= len(connectivities))
- fourth_connectivities = second_callback.block_until_connectivities_satisfy(
- bool)
- # Wait for a connection that will happen (or may already have happened).
- first_callback.block_until_connectivities_satisfy(_ready_in_connectivities)
- second_callback.block_until_connectivities_satisfy(_ready_in_connectivities)
- del channel
-
- self.assertSequenceEqual(
- (grpc.ChannelConnectivity.IDLE,), first_connectivities)
- self.assertSequenceEqual(
- (grpc.ChannelConnectivity.IDLE,), second_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.SHUTDOWN, third_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.TRANSIENT_FAILURE,
- fourth_connectivities)
- self.assertNotIn(
- grpc.ChannelConnectivity.SHUTDOWN, fourth_connectivities)
- self.assertFalse(thread_pool.was_used())
-
- def test_reachable_then_unreachable_channel_connectivity(self):
- thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = grpc.server(thread_pool)
- port = server.add_insecure_port('[::]:0')
- server.start()
- callback = _Callback()
-
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- channel.subscribe(callback.update, try_to_connect=True)
- callback.block_until_connectivities_satisfy(_ready_in_connectivities)
- # Now take down the server and confirm that channel readiness is repudiated.
- server.stop(None)
- callback.block_until_connectivities_satisfy(_last_connectivity_is_not_ready)
- channel.unsubscribe(callback.update)
- self.assertFalse(thread_pool.was_used())
+ def test_lonely_channel_connectivity(self):
+ callback = _Callback()
+
+ channel = grpc.insecure_channel('localhost:12345')
+ channel.subscribe(callback.update, try_to_connect=False)
+ first_connectivities = callback.block_until_connectivities_satisfy(bool)
+ channel.subscribe(callback.update, try_to_connect=True)
+ second_connectivities = callback.block_until_connectivities_satisfy(
+ lambda connectivities: 2 <= len(connectivities))
+ # Wait for a connection that will never happen.
+ time.sleep(test_constants.SHORT_TIMEOUT)
+ third_connectivities = callback.connectivities()
+ channel.unsubscribe(callback.update)
+ fourth_connectivities = callback.connectivities()
+ channel.unsubscribe(callback.update)
+ fifth_connectivities = callback.connectivities()
+
+ self.assertSequenceEqual((grpc.ChannelConnectivity.IDLE,),
+ first_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.READY, second_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.READY, third_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.READY, fourth_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.READY, fifth_connectivities)
+
+ def test_immediately_connectable_channel_connectivity(self):
+ thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
+ server = grpc.server(thread_pool)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ first_callback = _Callback()
+ second_callback = _Callback()
+
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ channel.subscribe(first_callback.update, try_to_connect=False)
+ first_connectivities = first_callback.block_until_connectivities_satisfy(
+ bool)
+ # Wait for a connection that will never happen because try_to_connect=True
+ # has not yet been passed.
+ time.sleep(test_constants.SHORT_TIMEOUT)
+ second_connectivities = first_callback.connectivities()
+ channel.subscribe(second_callback.update, try_to_connect=True)
+ third_connectivities = first_callback.block_until_connectivities_satisfy(
+ lambda connectivities: 2 <= len(connectivities))
+ fourth_connectivities = second_callback.block_until_connectivities_satisfy(
+ bool)
+ # Wait for a connection that will happen (or may already have happened).
+ first_callback.block_until_connectivities_satisfy(
+ _ready_in_connectivities)
+ second_callback.block_until_connectivities_satisfy(
+ _ready_in_connectivities)
+ del channel
+
+ self.assertSequenceEqual((grpc.ChannelConnectivity.IDLE,),
+ first_connectivities)
+ self.assertSequenceEqual((grpc.ChannelConnectivity.IDLE,),
+ second_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ third_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.SHUTDOWN,
+ third_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ fourth_connectivities)
+ self.assertNotIn(grpc.ChannelConnectivity.SHUTDOWN,
+ fourth_connectivities)
+ self.assertFalse(thread_pool.was_used())
+
+ def test_reachable_then_unreachable_channel_connectivity(self):
+ thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
+ server = grpc.server(thread_pool)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ callback = _Callback()
+
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ channel.subscribe(callback.update, try_to_connect=True)
+ callback.block_until_connectivities_satisfy(_ready_in_connectivities)
+ # Now take down the server and confirm that channel readiness is repudiated.
+ server.stop(None)
+ callback.block_until_connectivities_satisfy(
+ _last_connectivity_is_not_ready)
+ channel.unsubscribe(callback.update)
+ self.assertFalse(thread_pool.was_used())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
index 46a964db8c..2d1b63e15f 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc.channel_ready_future."""
import threading
@@ -39,65 +38,66 @@ from tests.unit import _thread_pool
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._value = None
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
- def accept_value(self, value):
- with self._condition:
- self._value = value
- self._condition.notify_all()
+ def accept_value(self, value):
+ with self._condition:
+ self._value = value
+ self._condition.notify_all()
- def block_until_called(self):
- with self._condition:
- while self._value is None:
- self._condition.wait()
- return self._value
+ def block_until_called(self):
+ with self._condition:
+ while self._value is None:
+ self._condition.wait()
+ return self._value
class ChannelReadyFutureTest(unittest.TestCase):
- def test_lonely_channel_connectivity(self):
- channel = grpc.insecure_channel('localhost:12345')
- callback = _Callback()
-
- ready_future = grpc.channel_ready_future(channel)
- ready_future.add_done_callback(callback.accept_value)
- with self.assertRaises(grpc.FutureTimeoutError):
- ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
- self.assertFalse(ready_future.cancelled())
- self.assertFalse(ready_future.done())
- self.assertTrue(ready_future.running())
- ready_future.cancel()
- value_passed_to_callback = callback.block_until_called()
- self.assertIs(ready_future, value_passed_to_callback)
- self.assertTrue(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
-
- def test_immediately_connectable_channel_connectivity(self):
- thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = grpc.server(thread_pool)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- callback = _Callback()
-
- ready_future = grpc.channel_ready_future(channel)
- ready_future.add_done_callback(callback.accept_value)
- self.assertIsNone(ready_future.result(timeout=test_constants.LONG_TIMEOUT))
- value_passed_to_callback = callback.block_until_called()
- self.assertIs(ready_future, value_passed_to_callback)
- self.assertFalse(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
- # Cancellation after maturity has no effect.
- ready_future.cancel()
- self.assertFalse(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
- self.assertFalse(thread_pool.was_used())
+ def test_lonely_channel_connectivity(self):
+ channel = grpc.insecure_channel('localhost:12345')
+ callback = _Callback()
+
+ ready_future = grpc.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ with self.assertRaises(grpc.FutureTimeoutError):
+ ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
+ self.assertFalse(ready_future.cancelled())
+ self.assertFalse(ready_future.done())
+ self.assertTrue(ready_future.running())
+ ready_future.cancel()
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertTrue(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+
+ def test_immediately_connectable_channel_connectivity(self):
+ thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
+ server = grpc.server(thread_pool)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ callback = _Callback()
+
+ ready_future = grpc.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ self.assertIsNone(
+ ready_future.result(timeout=test_constants.LONG_TIMEOUT))
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+ # Cancellation after maturity has no effect.
+ ready_future.cancel()
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+ self.assertFalse(thread_pool.was_used())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py
index 4d3f02e917..7dd944e600 100644
--- a/src/python/grpcio_tests/tests/unit/_compression_test.py
+++ b/src/python/grpcio_tests/tests/unit/_compression_test.py
@@ -42,93 +42,96 @@ _STREAM_STREAM = '/test/StreamStream'
def handle_unary(request, servicer_context):
- servicer_context.send_initial_metadata([
- ('grpc-internal-encoding-request', 'gzip')])
- return request
+ servicer_context.send_initial_metadata(
+ [('grpc-internal-encoding-request', 'gzip')])
+ return request
def handle_stream(request_iterator, servicer_context):
- # TODO(issue:#6891) We should be able to remove this loop,
- # and replace with return; yield
- servicer_context.send_initial_metadata([
- ('grpc-internal-encoding-request', 'gzip')])
- for request in request_iterator:
- yield request
+ # TODO(issue:#6891) We should be able to remove this loop,
+ # and replace with return; yield
+ servicer_context.send_initial_metadata(
+ [('grpc-internal-encoding-request', 'gzip')])
+ for request in request_iterator:
+ yield request
class _MethodHandler(grpc.RpcMethodHandler):
- def __init__(self, request_streaming, response_streaming):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = None
- self.response_serializer = None
- self.unary_unary = None
- self.unary_stream = None
- self.stream_unary = None
- self.stream_stream = None
- if self.request_streaming and self.response_streaming:
- self.stream_stream = lambda x, y: handle_stream(x, y)
- elif not self.request_streaming and not self.response_streaming:
- self.unary_unary = lambda x, y: handle_unary(x, y)
+ def __init__(self, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = lambda x, y: handle_stream(x, y)
+ elif not self.request_streaming and not self.response_streaming:
+ self.unary_unary = lambda x, y: handle_unary(x, y)
class _GenericHandler(grpc.GenericRpcHandler):
- def service(self, handler_call_details):
- if handler_call_details.method == _UNARY_UNARY:
- return _MethodHandler(False, False)
- elif handler_call_details.method == _STREAM_STREAM:
- return _MethodHandler(True, True)
- else:
- return None
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(False, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(True, True)
+ else:
+ return None
class CompressionTest(unittest.TestCase):
- def setUp(self):
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(
- self._server_pool, handlers=(_GenericHandler(),))
- self._port = self._server.add_insecure_port('[::]:0')
- self._server.start()
-
- def testUnary(self):
- request = b'\x00' * 100
-
- # Client -> server compressed through default client channel compression
- # settings. Server -> client compressed via server-side metadata setting.
- # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
- # literal with proper use of the public API.
- compressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
- options=[('grpc.default_compression_algorithm', 1)])
- multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
- response = multi_callable(request)
- self.assertEqual(request, response)
-
- # Client -> server compressed through client metadata setting. Server ->
- # client compressed via server-side metadata setting.
- # TODO(https://github.com/grpc/grpc/issues/4078): replace the "0" integer
- # literal with proper use of the public API.
- uncompressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
- options=[('grpc.default_compression_algorithm', 0)])
- multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
- response = multi_callable(request, metadata=[
- ('grpc-internal-encoding-request', 'gzip')])
- self.assertEqual(request, response)
-
- def testStreaming(self):
- request = b'\x00' * 100
-
- # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
- # literal with proper use of the public API.
- compressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
- options=[('grpc.default_compression_algorithm', 1)])
- multi_callable = compressed_channel.stream_stream(_STREAM_STREAM)
- call = multi_callable(iter([request] * test_constants.STREAM_LENGTH))
- for response in call:
- self.assertEqual(request, response)
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(
+ self._server_pool, handlers=(_GenericHandler(),))
+ self._port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+
+ def testUnary(self):
+ request = b'\x00' * 100
+
+ # Client -> server compressed through default client channel compression
+ # settings. Server -> client compressed via server-side metadata setting.
+ # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
+ # literal with proper use of the public API.
+ compressed_channel = grpc.insecure_channel(
+ 'localhost:%d' % self._port,
+ options=[('grpc.default_compression_algorithm', 1)])
+ multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
+ response = multi_callable(request)
+ self.assertEqual(request, response)
+
+ # Client -> server compressed through client metadata setting. Server ->
+ # client compressed via server-side metadata setting.
+ # TODO(https://github.com/grpc/grpc/issues/4078): replace the "0" integer
+ # literal with proper use of the public API.
+ uncompressed_channel = grpc.insecure_channel(
+ 'localhost:%d' % self._port,
+ options=[('grpc.default_compression_algorithm', 0)])
+ multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
+ response = multi_callable(
+ request, metadata=[('grpc-internal-encoding-request', 'gzip')])
+ self.assertEqual(request, response)
+
+ def testStreaming(self):
+ request = b'\x00' * 100
+
+ # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
+ # literal with proper use of the public API.
+ compressed_channel = grpc.insecure_channel(
+ 'localhost:%d' % self._port,
+ options=[('grpc.default_compression_algorithm', 1)])
+ multi_callable = compressed_channel.stream_stream(_STREAM_STREAM)
+ call = multi_callable(iter([request] * test_constants.STREAM_LENGTH))
+ for response in call:
+ self.assertEqual(request, response)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py
index 87af85a0b9..21bf29789a 100644
--- a/src/python/grpcio_tests/tests/unit/_credentials_test.py
+++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of credentials."""
import unittest
@@ -36,37 +35,38 @@ import grpc
class CredentialsTest(unittest.TestCase):
- def test_call_credentials_composition(self):
- first = grpc.access_token_call_credentials('abc')
- second = grpc.access_token_call_credentials('def')
- third = grpc.access_token_call_credentials('ghi')
+ def test_call_credentials_composition(self):
+ first = grpc.access_token_call_credentials('abc')
+ second = grpc.access_token_call_credentials('def')
+ third = grpc.access_token_call_credentials('ghi')
+
+ first_and_second = grpc.composite_call_credentials(first, second)
+ first_second_and_third = grpc.composite_call_credentials(first, second,
+ third)
- first_and_second = grpc.composite_call_credentials(first, second)
- first_second_and_third = grpc.composite_call_credentials(
- first, second, third)
-
- self.assertIsInstance(first_and_second, grpc.CallCredentials)
- self.assertIsInstance(first_second_and_third, grpc.CallCredentials)
+ self.assertIsInstance(first_and_second, grpc.CallCredentials)
+ self.assertIsInstance(first_second_and_third, grpc.CallCredentials)
- def test_channel_credentials_composition(self):
- first_call_credentials = grpc.access_token_call_credentials('abc')
- second_call_credentials = grpc.access_token_call_credentials('def')
- third_call_credentials = grpc.access_token_call_credentials('ghi')
- channel_credentials = grpc.ssl_channel_credentials()
+ def test_channel_credentials_composition(self):
+ first_call_credentials = grpc.access_token_call_credentials('abc')
+ second_call_credentials = grpc.access_token_call_credentials('def')
+ third_call_credentials = grpc.access_token_call_credentials('ghi')
+ channel_credentials = grpc.ssl_channel_credentials()
- channel_and_first = grpc.composite_channel_credentials(
- channel_credentials, first_call_credentials)
- channel_first_and_second = grpc.composite_channel_credentials(
- channel_credentials, first_call_credentials, second_call_credentials)
- channel_first_second_and_third = grpc.composite_channel_credentials(
- channel_credentials, first_call_credentials, second_call_credentials,
- third_call_credentials)
+ channel_and_first = grpc.composite_channel_credentials(
+ channel_credentials, first_call_credentials)
+ channel_first_and_second = grpc.composite_channel_credentials(
+ channel_credentials, first_call_credentials,
+ second_call_credentials)
+ channel_first_second_and_third = grpc.composite_channel_credentials(
+ channel_credentials, first_call_credentials,
+ second_call_credentials, third_call_credentials)
- self.assertIsInstance(channel_and_first, grpc.ChannelCredentials)
- self.assertIsInstance(channel_first_and_second, grpc.ChannelCredentials)
- self.assertIsInstance(
- channel_first_second_and_third, grpc.ChannelCredentials)
+ self.assertIsInstance(channel_and_first, grpc.ChannelCredentials)
+ self.assertIsInstance(channel_first_and_second, grpc.ChannelCredentials)
+ self.assertIsInstance(channel_first_second_and_third,
+ grpc.ChannelCredentials)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
index 20115fb22c..d77f5ecb27 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test making many calls and immediately cancelling most of them."""
import threading
@@ -51,173 +50,178 @@ _SUCCESS_CALL_FRACTION = 1.0 / 8.0
class _State(object):
- def __init__(self):
- self.condition = threading.Condition()
- self.handlers_released = False
- self.parked_handlers = 0
- self.handled_rpcs = 0
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.handlers_released = False
+ self.parked_handlers = 0
+ self.handled_rpcs = 0
def _is_cancellation_event(event):
- return (
- event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
- event.batch_operations[0].received_cancelled)
+ return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
+ event.batch_operations[0].received_cancelled)
class _Handler(object):
- def __init__(self, state, completion_queue, rpc_event):
- self._state = state
- self._lock = threading.Lock()
- self._completion_queue = completion_queue
- self._call = rpc_event.operation_call
-
- def __call__(self):
- with self._state.condition:
- self._state.parked_handlers += 1
- if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
- self._state.condition.notify_all()
- while not self._state.handlers_released:
- self._state.condition.wait()
-
- with self._lock:
- self._call.start_server_batch(
- cygrpc.Operations(
- (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
- _RECEIVE_CLOSE_ON_SERVER_TAG)
- self._call.start_server_batch(
- cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
- _RECEIVE_MESSAGE_TAG)
- first_event = self._completion_queue.poll()
- if _is_cancellation_event(first_event):
- self._completion_queue.poll()
- else:
- with self._lock:
- operations = (
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_send_message(b'\x79\x57', _EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
- _EMPTY_FLAGS),
- )
- self._call.start_server_batch(
- cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
- self._completion_queue.poll()
- self._completion_queue.poll()
+ def __init__(self, state, completion_queue, rpc_event):
+ self._state = state
+ self._lock = threading.Lock()
+ self._completion_queue = completion_queue
+ self._call = rpc_event.operation_call
+
+ def __call__(self):
+ with self._state.condition:
+ self._state.parked_handlers += 1
+ if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
+ self._state.condition.notify_all()
+ while not self._state.handlers_released:
+ self._state.condition.wait()
+
+ with self._lock:
+ self._call.start_server_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+ _RECEIVE_CLOSE_ON_SERVER_TAG)
+ self._call.start_server_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _RECEIVE_MESSAGE_TAG)
+ first_event = self._completion_queue.poll()
+ if _is_cancellation_event(first_event):
+ self._completion_queue.poll()
+ else:
+ with self._lock:
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_message(b'\x79\x57', _EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
+ _EMPTY_FLAGS),)
+ self._call.start_server_batch(
+ cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
+ self._completion_queue.poll()
+ self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
- for _ in range(test_constants.RPC_CONCURRENCY):
- call_completion_queue = cygrpc.CompletionQueue()
- server.request_call(
- call_completion_queue, server_completion_queue, _REQUEST_CALL_TAG)
- rpc_event = server_completion_queue.poll()
- thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
- with state.condition:
- state.handled_rpcs += 1
- if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
- state.condition.notify_all()
- server_completion_queue.poll()
+ for _ in range(test_constants.RPC_CONCURRENCY):
+ call_completion_queue = cygrpc.CompletionQueue()
+ server.request_call(call_completion_queue, server_completion_queue,
+ _REQUEST_CALL_TAG)
+ rpc_event = server_completion_queue.poll()
+ thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
+ with state.condition:
+ state.handled_rpcs += 1
+ if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
+ state.condition.notify_all()
+ server_completion_queue.poll()
class _QueueDriver(object):
- def __init__(self, condition, completion_queue, due):
- self._condition = condition
- self._completion_queue = completion_queue
- self._due = due
- self._events = []
- self._returned = False
-
- def start(self):
- def in_thread():
- while True:
- event = self._completion_queue.poll()
+ def __init__(self, condition, completion_queue, due):
+ self._condition = condition
+ self._completion_queue = completion_queue
+ self._due = due
+ self._events = []
+ self._returned = False
+
+ def start(self):
+
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._due.remove(event.tag)
+ self._condition.notify_all()
+ if not self._due:
+ self._returned = True
+ return
+
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def events(self, at_least):
with self._condition:
- self._events.append(event)
- self._due.remove(event.tag)
- self._condition.notify_all()
- if not self._due:
- self._returned = True
- return
- thread = threading.Thread(target=in_thread)
- thread.start()
-
- def events(self, at_least):
- with self._condition:
- while len(self._events) < at_least:
- self._condition.wait()
- return tuple(self._events)
+ while len(self._events) < at_least:
+ self._condition.wait()
+ return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
- def testCancelManyCalls(self):
- server_thread_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-
- server_completion_queue = cygrpc.CompletionQueue()
- server = cygrpc.Server(cygrpc.ChannelArgs([]))
- server.register_completion_queue(server_completion_queue)
- port = server.add_http2_port(b'[::]:0')
- server.start()
- channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
- cygrpc.ChannelArgs([]))
-
- state = _State()
-
- server_thread_args = (
- state, server, server_completion_queue, server_thread_pool,)
- server_thread = threading.Thread(target=_serve, args=server_thread_args)
- server_thread.start()
-
- client_condition = threading.Condition()
- client_due = set()
- client_completion_queue = cygrpc.CompletionQueue()
- client_driver = _QueueDriver(
- client_condition, client_completion_queue, client_due)
- client_driver.start()
-
- with client_condition:
- client_calls = []
- for index in range(test_constants.RPC_CONCURRENCY):
- client_call = channel.create_call(
- None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
- _INFINITE_FUTURE)
- operations = (
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_send_message(b'\x45\x56', _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- )
- tag = 'client_complete_call_{0:04d}_tag'.format(index)
- client_call.start_client_batch(cygrpc.Operations(operations), tag)
- client_due.add(tag)
- client_calls.append(client_call)
-
- with state.condition:
- while True:
- if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
- state.condition.wait()
- elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
- state.condition.wait()
- else:
- state.handlers_released = True
- state.condition.notify_all()
- break
-
- client_driver.events(
- test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
- with client_condition:
- for client_call in client_calls:
- client_call.cancel()
-
- with state.condition:
- server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
+ def testCancelManyCalls(self):
+ server_thread_pool = logging_pool.pool(
+ test_constants.THREAD_CONCURRENCY)
+
+ server_completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ server.register_completion_queue(server_completion_queue)
+ port = server.add_http2_port(b'[::]:0')
+ server.start()
+ channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
+ cygrpc.ChannelArgs([]))
+
+ state = _State()
+
+ server_thread_args = (
+ state,
+ server,
+ server_completion_queue,
+ server_thread_pool,)
+ server_thread = threading.Thread(target=_serve, args=server_thread_args)
+ server_thread.start()
+
+ client_condition = threading.Condition()
+ client_due = set()
+ client_completion_queue = cygrpc.CompletionQueue()
+ client_driver = _QueueDriver(client_condition, client_completion_queue,
+ client_due)
+ client_driver.start()
+
+ with client_condition:
+ client_calls = []
+ for index in range(test_constants.RPC_CONCURRENCY):
+ client_call = channel.create_call(
+ None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies',
+ None, _INFINITE_FUTURE)
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_message(b'\x45\x56', _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
+ tag = 'client_complete_call_{0:04d}_tag'.format(index)
+ client_call.start_client_batch(
+ cygrpc.Operations(operations), tag)
+ client_due.add(tag)
+ client_calls.append(client_call)
+
+ with state.condition:
+ while True:
+ if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
+ state.condition.wait()
+ elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
+ state.condition.wait()
+ else:
+ state.handlers_released = True
+ state.condition.notify_all()
+ break
+
+ client_driver.events(test_constants.RPC_CONCURRENCY *
+ _SUCCESS_CALL_FRACTION)
+ with client_condition:
+ for client_call in client_calls:
+ client_call.cancel()
+
+ with state.condition:
+ server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
index f9c8a3ac62..0ca06868b2 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
@@ -37,46 +37,49 @@ from tests.unit.framework.common import test_constants
def _channel_and_completion_queue():
- channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(()))
- completion_queue = cygrpc.CompletionQueue()
- return channel, completion_queue
+ channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(()))
+ completion_queue = cygrpc.CompletionQueue()
+ return channel, completion_queue
def _connectivity_loop(channel, completion_queue):
- for _ in range(100):
- connectivity = channel.check_connectivity_state(True)
- channel.watch_connectivity_state(
- connectivity, cygrpc.Timespec(time.time() + 0.2), completion_queue,
- None)
- completion_queue.poll(deadline=cygrpc.Timespec(float('+inf')))
+ for _ in range(100):
+ connectivity = channel.check_connectivity_state(True)
+ channel.watch_connectivity_state(connectivity,
+ cygrpc.Timespec(time.time() + 0.2),
+ completion_queue, None)
+ completion_queue.poll(deadline=cygrpc.Timespec(float('+inf')))
def _create_loop_destroy():
- channel, completion_queue = _channel_and_completion_queue()
- _connectivity_loop(channel, completion_queue)
- completion_queue.shutdown()
+ channel, completion_queue = _channel_and_completion_queue()
+ _connectivity_loop(channel, completion_queue)
+ completion_queue.shutdown()
def _in_parallel(behavior, arguments):
- threads = tuple(
- threading.Thread(target=behavior, args=arguments)
- for _ in range(test_constants.THREAD_CONCURRENCY))
- for thread in threads:
- thread.start()
- for thread in threads:
- thread.join()
+ threads = tuple(
+ threading.Thread(
+ target=behavior, args=arguments)
+ for _ in range(test_constants.THREAD_CONCURRENCY))
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
class ChannelTest(unittest.TestCase):
- def test_single_channel_lonely_connectivity(self):
- channel, completion_queue = _channel_and_completion_queue()
- _in_parallel(_connectivity_loop, (channel, completion_queue,))
- completion_queue.shutdown()
+ def test_single_channel_lonely_connectivity(self):
+ channel, completion_queue = _channel_and_completion_queue()
+ _in_parallel(_connectivity_loop, (
+ channel,
+ completion_queue,))
+ completion_queue.shutdown()
- def test_multiple_channels_lonely_connectivity(self):
- _in_parallel(_create_loop_destroy, ())
+ def test_multiple_channels_lonely_connectivity(self):
+ _in_parallel(_create_loop_destroy, ())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
index 2ae5285232..9fbfcbb9c0 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test a corner-case at the level of the Cython API."""
import threading
@@ -41,212 +40,221 @@ _EMPTY_METADATA = cygrpc.Metadata(())
class _ServerDriver(object):
- def __init__(self, completion_queue, shutdown_tag):
- self._condition = threading.Condition()
- self._completion_queue = completion_queue
- self._shutdown_tag = shutdown_tag
- self._events = []
- self._saw_shutdown_tag = False
-
- def start(self):
- def in_thread():
- while True:
- event = self._completion_queue.poll()
+ def __init__(self, completion_queue, shutdown_tag):
+ self._condition = threading.Condition()
+ self._completion_queue = completion_queue
+ self._shutdown_tag = shutdown_tag
+ self._events = []
+ self._saw_shutdown_tag = False
+
+ def start(self):
+
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._condition.notify()
+ if event.tag is self._shutdown_tag:
+ self._saw_shutdown_tag = True
+ break
+
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def done(self):
+ with self._condition:
+ return self._saw_shutdown_tag
+
+ def first_event(self):
+ with self._condition:
+ while not self._events:
+ self._condition.wait()
+ return self._events[0]
+
+ def events(self):
with self._condition:
- self._events.append(event)
- self._condition.notify()
- if event.tag is self._shutdown_tag:
- self._saw_shutdown_tag = True
- break
- thread = threading.Thread(target=in_thread)
- thread.start()
-
- def done(self):
- with self._condition:
- return self._saw_shutdown_tag
-
- def first_event(self):
- with self._condition:
- while not self._events:
- self._condition.wait()
- return self._events[0]
-
- def events(self):
- with self._condition:
- while not self._saw_shutdown_tag:
- self._condition.wait()
- return tuple(self._events)
+ while not self._saw_shutdown_tag:
+ self._condition.wait()
+ return tuple(self._events)
class _QueueDriver(object):
- def __init__(self, condition, completion_queue, due):
- self._condition = condition
- self._completion_queue = completion_queue
- self._due = due
- self._events = []
- self._returned = False
-
- def start(self):
- def in_thread():
- while True:
- event = self._completion_queue.poll()
+ def __init__(self, condition, completion_queue, due):
+ self._condition = condition
+ self._completion_queue = completion_queue
+ self._due = due
+ self._events = []
+ self._returned = False
+
+ def start(self):
+
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._due.remove(event.tag)
+ self._condition.notify_all()
+ if not self._due:
+ self._returned = True
+ return
+
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def done(self):
+ with self._condition:
+ return self._returned
+
+ def event_with_tag(self, tag):
+ with self._condition:
+ while True:
+ for event in self._events:
+ if event.tag is tag:
+ return event
+ self._condition.wait()
+
+ def events(self):
with self._condition:
- self._events.append(event)
- self._due.remove(event.tag)
- self._condition.notify_all()
- if not self._due:
- self._returned = True
- return
- thread = threading.Thread(target=in_thread)
- thread.start()
-
- def done(self):
- with self._condition:
- return self._returned
-
- def event_with_tag(self, tag):
- with self._condition:
- while True:
- for event in self._events:
- if event.tag is tag:
- return event
- self._condition.wait()
-
- def events(self):
- with self._condition:
- while not self._returned:
- self._condition.wait()
- return tuple(self._events)
+ while not self._returned:
+ self._condition.wait()
+ return tuple(self._events)
class ReadSomeButNotAllResponsesTest(unittest.TestCase):
- def testReadSomeButNotAllResponses(self):
- server_completion_queue = cygrpc.CompletionQueue()
- server = cygrpc.Server(cygrpc.ChannelArgs([]))
- server.register_completion_queue(server_completion_queue)
- port = server.add_http2_port(b'[::]:0')
- server.start()
- channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
- cygrpc.ChannelArgs([]))
-
- server_shutdown_tag = 'server_shutdown_tag'
- server_driver = _ServerDriver(server_completion_queue, server_shutdown_tag)
- server_driver.start()
-
- client_condition = threading.Condition()
- client_due = set()
- client_completion_queue = cygrpc.CompletionQueue()
- client_driver = _QueueDriver(
- client_condition, client_completion_queue, client_due)
- client_driver.start()
-
- server_call_condition = threading.Condition()
- server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
- server_send_first_message_tag = 'server_send_first_message_tag'
- server_send_second_message_tag = 'server_send_second_message_tag'
- server_complete_rpc_tag = 'server_complete_rpc_tag'
- server_call_due = set((
- server_send_initial_metadata_tag,
- server_send_first_message_tag,
- server_send_second_message_tag,
- server_complete_rpc_tag,
- ))
- server_call_completion_queue = cygrpc.CompletionQueue()
- server_call_driver = _QueueDriver(
- server_call_condition, server_call_completion_queue, server_call_due)
- server_call_driver.start()
-
- server_rpc_tag = 'server_rpc_tag'
- request_call_result = server.request_call(
- server_call_completion_queue, server_completion_queue, server_rpc_tag)
-
- client_call = channel.create_call(
- None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
- _INFINITE_FUTURE)
- client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
- client_complete_rpc_tag = 'client_complete_rpc_tag'
- with client_condition:
- client_receive_initial_metadata_start_batch_result = (
- client_call.start_client_batch(cygrpc.Operations([
- cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
- ]), client_receive_initial_metadata_tag))
- client_due.add(client_receive_initial_metadata_tag)
- client_complete_rpc_start_batch_result = (
- client_call.start_client_batch(cygrpc.Operations([
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
- ]), client_complete_rpc_tag))
- client_due.add(client_complete_rpc_tag)
-
- server_rpc_event = server_driver.first_event()
-
- with server_call_condition:
- server_send_initial_metadata_start_batch_result = (
- server_rpc_event.operation_call.start_server_batch([
- cygrpc.operation_send_initial_metadata(
- _EMPTY_METADATA, _EMPTY_FLAGS),
- ], server_send_initial_metadata_tag))
- server_send_first_message_start_batch_result = (
- server_rpc_event.operation_call.start_server_batch([
- cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
- ], server_send_first_message_tag))
- server_send_initial_metadata_event = server_call_driver.event_with_tag(
- server_send_initial_metadata_tag)
- server_send_first_message_event = server_call_driver.event_with_tag(
- server_send_first_message_tag)
- with server_call_condition:
- server_send_second_message_start_batch_result = (
- server_rpc_event.operation_call.start_server_batch([
- cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
- ], server_send_second_message_tag))
- server_complete_rpc_start_batch_result = (
- server_rpc_event.operation_call.start_server_batch([
- cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details',
- _EMPTY_FLAGS),
- ], server_complete_rpc_tag))
- server_send_second_message_event = server_call_driver.event_with_tag(
- server_send_second_message_tag)
- server_complete_rpc_event = server_call_driver.event_with_tag(
- server_complete_rpc_tag)
- server_call_driver.events()
-
- with client_condition:
- client_receive_first_message_tag = 'client_receive_first_message_tag'
- client_receive_first_message_start_batch_result = (
- client_call.start_client_batch(cygrpc.Operations([
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- ]), client_receive_first_message_tag))
- client_due.add(client_receive_first_message_tag)
- client_receive_first_message_event = client_driver.event_with_tag(
- client_receive_first_message_tag)
-
- client_call_cancel_result = client_call.cancel()
- client_driver.events()
-
- server.shutdown(server_completion_queue, server_shutdown_tag)
- server.cancel_all_calls()
- server_driver.events()
-
- self.assertEqual(cygrpc.CallError.ok, request_call_result)
- self.assertEqual(
- cygrpc.CallError.ok, server_send_initial_metadata_start_batch_result)
- self.assertEqual(
- cygrpc.CallError.ok, client_receive_initial_metadata_start_batch_result)
- self.assertEqual(
- cygrpc.CallError.ok, client_complete_rpc_start_batch_result)
- self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
- self.assertIs(server_rpc_tag, server_rpc_event.tag)
- self.assertEqual(
- cygrpc.CompletionType.operation_complete, server_rpc_event.type)
- self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
- self.assertEqual(0, len(server_rpc_event.batch_operations))
+ def testReadSomeButNotAllResponses(self):
+ server_completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ server.register_completion_queue(server_completion_queue)
+ port = server.add_http2_port(b'[::]:0')
+ server.start()
+ channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
+ cygrpc.ChannelArgs([]))
+
+ server_shutdown_tag = 'server_shutdown_tag'
+ server_driver = _ServerDriver(server_completion_queue,
+ server_shutdown_tag)
+ server_driver.start()
+
+ client_condition = threading.Condition()
+ client_due = set()
+ client_completion_queue = cygrpc.CompletionQueue()
+ client_driver = _QueueDriver(client_condition, client_completion_queue,
+ client_due)
+ client_driver.start()
+
+ server_call_condition = threading.Condition()
+ server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
+ server_send_first_message_tag = 'server_send_first_message_tag'
+ server_send_second_message_tag = 'server_send_second_message_tag'
+ server_complete_rpc_tag = 'server_complete_rpc_tag'
+ server_call_due = set((
+ server_send_initial_metadata_tag,
+ server_send_first_message_tag,
+ server_send_second_message_tag,
+ server_complete_rpc_tag,))
+ server_call_completion_queue = cygrpc.CompletionQueue()
+ server_call_driver = _QueueDriver(server_call_condition,
+ server_call_completion_queue,
+ server_call_due)
+ server_call_driver.start()
+
+ server_rpc_tag = 'server_rpc_tag'
+ request_call_result = server.request_call(server_call_completion_queue,
+ server_completion_queue,
+ server_rpc_tag)
+
+ client_call = channel.create_call(None, _EMPTY_FLAGS,
+ client_completion_queue, b'/twinkies',
+ None, _INFINITE_FUTURE)
+ client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
+ client_complete_rpc_tag = 'client_complete_rpc_tag'
+ with client_condition:
+ client_receive_initial_metadata_start_batch_result = (
+ client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ ]), client_receive_initial_metadata_tag))
+ client_due.add(client_receive_initial_metadata_tag)
+ client_complete_rpc_start_batch_result = (
+ client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ ]), client_complete_rpc_tag))
+ client_due.add(client_complete_rpc_tag)
+
+ server_rpc_event = server_driver.first_event()
+
+ with server_call_condition:
+ server_send_initial_metadata_start_batch_result = (
+ server_rpc_event.operation_call.start_server_batch([
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+ _EMPTY_FLAGS),
+ ], server_send_initial_metadata_tag))
+ server_send_first_message_start_batch_result = (
+ server_rpc_event.operation_call.start_server_batch([
+ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
+ ], server_send_first_message_tag))
+ server_send_initial_metadata_event = server_call_driver.event_with_tag(
+ server_send_initial_metadata_tag)
+ server_send_first_message_event = server_call_driver.event_with_tag(
+ server_send_first_message_tag)
+ with server_call_condition:
+ server_send_second_message_start_batch_result = (
+ server_rpc_event.operation_call.start_server_batch([
+ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
+ ], server_send_second_message_tag))
+ server_complete_rpc_start_batch_result = (
+ server_rpc_event.operation_call.start_server_batch([
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ cygrpc.Metadata(()), cygrpc.StatusCode.ok,
+ b'test details', _EMPTY_FLAGS),
+ ], server_complete_rpc_tag))
+ server_send_second_message_event = server_call_driver.event_with_tag(
+ server_send_second_message_tag)
+ server_complete_rpc_event = server_call_driver.event_with_tag(
+ server_complete_rpc_tag)
+ server_call_driver.events()
+
+ with client_condition:
+ client_receive_first_message_tag = 'client_receive_first_message_tag'
+ client_receive_first_message_start_batch_result = (
+ client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ]), client_receive_first_message_tag))
+ client_due.add(client_receive_first_message_tag)
+ client_receive_first_message_event = client_driver.event_with_tag(
+ client_receive_first_message_tag)
+
+ client_call_cancel_result = client_call.cancel()
+ client_driver.events()
+
+ server.shutdown(server_completion_queue, server_shutdown_tag)
+ server.cancel_all_calls()
+ server_driver.events()
+
+ self.assertEqual(cygrpc.CallError.ok, request_call_result)
+ self.assertEqual(cygrpc.CallError.ok,
+ server_send_initial_metadata_start_batch_result)
+ self.assertEqual(cygrpc.CallError.ok,
+ client_receive_initial_metadata_start_batch_result)
+ self.assertEqual(cygrpc.CallError.ok,
+ client_complete_rpc_start_batch_result)
+ self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
+ self.assertIs(server_rpc_tag, server_rpc_event.tag)
+ self.assertEqual(cygrpc.CompletionType.operation_complete,
+ server_rpc_event.type)
+ self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
+ self.assertEqual(0, len(server_rpc_event.batch_operations))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
index 8dedebfabe..7aec316b95 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
@@ -37,399 +37,421 @@ from tests.unit._cython import test_utilities
from tests.unit import test_common
from tests.unit import resources
-
_SSL_HOST_OVERRIDE = b'foo.test.google.fr'
_CALL_CREDENTIALS_METADATA_KEY = 'call-creds-key'
_CALL_CREDENTIALS_METADATA_VALUE = 'call-creds-value'
_EMPTY_FLAGS = 0
+
def _metadata_plugin_callback(context, callback):
- callback(cygrpc.Metadata(
- [cygrpc.Metadatum(_CALL_CREDENTIALS_METADATA_KEY,
- _CALL_CREDENTIALS_METADATA_VALUE)]),
- cygrpc.StatusCode.ok, b'')
+ callback(
+ cygrpc.Metadata([
+ cygrpc.Metadatum(_CALL_CREDENTIALS_METADATA_KEY,
+ _CALL_CREDENTIALS_METADATA_VALUE)
+ ]), cygrpc.StatusCode.ok, b'')
class TypeSmokeTest(unittest.TestCase):
- def testStringsInUtilitiesUpDown(self):
- self.assertEqual(0, cygrpc.StatusCode.ok)
- metadatum = cygrpc.Metadatum(b'a', b'b')
- self.assertEqual(b'a', metadatum.key)
- self.assertEqual(b'b', metadatum.value)
- metadata = cygrpc.Metadata([metadatum])
- self.assertEqual(1, len(metadata))
- self.assertEqual(metadatum.key, metadata[0].key)
-
- def testMetadataIteration(self):
- metadata = cygrpc.Metadata([
- cygrpc.Metadatum(b'a', b'b'), cygrpc.Metadatum(b'c', b'd')])
- iterator = iter(metadata)
- metadatum = next(iterator)
- self.assertIsInstance(metadatum, cygrpc.Metadatum)
- self.assertEqual(metadatum.key, b'a')
- self.assertEqual(metadatum.value, b'b')
- metadatum = next(iterator)
- self.assertIsInstance(metadatum, cygrpc.Metadatum)
- self.assertEqual(metadatum.key, b'c')
- self.assertEqual(metadatum.value, b'd')
- with self.assertRaises(StopIteration):
- next(iterator)
-
- def testOperationsIteration(self):
- operations = cygrpc.Operations([
- cygrpc.operation_send_message(b'asdf', _EMPTY_FLAGS)])
- iterator = iter(operations)
- operation = next(iterator)
- self.assertIsInstance(operation, cygrpc.Operation)
- # `Operation`s are write-only structures; can't directly debug anything out
- # of them. Just check that we stop iterating.
- with self.assertRaises(StopIteration):
- next(iterator)
-
- def testOperationFlags(self):
- operation = cygrpc.operation_send_message(b'asdf',
- cygrpc.WriteFlag.no_compress)
- self.assertEqual(cygrpc.WriteFlag.no_compress, operation.flags)
-
- def testTimespec(self):
- now = time.time()
- timespec = cygrpc.Timespec(now)
- self.assertAlmostEqual(now, float(timespec), places=8)
-
- def testCompletionQueueUpDown(self):
- completion_queue = cygrpc.CompletionQueue()
- del completion_queue
-
- def testServerUpDown(self):
- server = cygrpc.Server(cygrpc.ChannelArgs([]))
- del server
-
- def testChannelUpDown(self):
- channel = cygrpc.Channel(b'[::]:0', cygrpc.ChannelArgs([]))
- del channel
-
- def testCredentialsMetadataPluginUpDown(self):
- plugin = cygrpc.CredentialsMetadataPlugin(
- lambda ignored_a, ignored_b: None, b'')
- del plugin
-
- def testCallCredentialsFromPluginUpDown(self):
- plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, b'')
- call_credentials = cygrpc.call_credentials_metadata_plugin(plugin)
- del plugin
- del call_credentials
-
- def testServerStartNoExplicitShutdown(self):
- server = cygrpc.Server(cygrpc.ChannelArgs([]))
- completion_queue = cygrpc.CompletionQueue()
- server.register_completion_queue(completion_queue)
- port = server.add_http2_port(b'[::]:0')
- self.assertIsInstance(port, int)
- server.start()
- del server
-
- def testServerStartShutdown(self):
- completion_queue = cygrpc.CompletionQueue()
- server = cygrpc.Server(cygrpc.ChannelArgs([]))
- server.add_http2_port(b'[::]:0')
- server.register_completion_queue(completion_queue)
- server.start()
- shutdown_tag = object()
- server.shutdown(completion_queue, shutdown_tag)
- event = completion_queue.poll()
- self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
- self.assertIs(shutdown_tag, event.tag)
- del server
- del completion_queue
+ def testStringsInUtilitiesUpDown(self):
+ self.assertEqual(0, cygrpc.StatusCode.ok)
+ metadatum = cygrpc.Metadatum(b'a', b'b')
+ self.assertEqual(b'a', metadatum.key)
+ self.assertEqual(b'b', metadatum.value)
+ metadata = cygrpc.Metadata([metadatum])
+ self.assertEqual(1, len(metadata))
+ self.assertEqual(metadatum.key, metadata[0].key)
+
+ def testMetadataIteration(self):
+ metadata = cygrpc.Metadata(
+ [cygrpc.Metadatum(b'a', b'b'), cygrpc.Metadatum(b'c', b'd')])
+ iterator = iter(metadata)
+ metadatum = next(iterator)
+ self.assertIsInstance(metadatum, cygrpc.Metadatum)
+ self.assertEqual(metadatum.key, b'a')
+ self.assertEqual(metadatum.value, b'b')
+ metadatum = next(iterator)
+ self.assertIsInstance(metadatum, cygrpc.Metadatum)
+ self.assertEqual(metadatum.key, b'c')
+ self.assertEqual(metadatum.value, b'd')
+ with self.assertRaises(StopIteration):
+ next(iterator)
+
+ def testOperationsIteration(self):
+ operations = cygrpc.Operations(
+ [cygrpc.operation_send_message(b'asdf', _EMPTY_FLAGS)])
+ iterator = iter(operations)
+ operation = next(iterator)
+ self.assertIsInstance(operation, cygrpc.Operation)
+ # `Operation`s are write-only structures; can't directly debug anything out
+ # of them. Just check that we stop iterating.
+ with self.assertRaises(StopIteration):
+ next(iterator)
+
+ def testOperationFlags(self):
+ operation = cygrpc.operation_send_message(b'asdf',
+ cygrpc.WriteFlag.no_compress)
+ self.assertEqual(cygrpc.WriteFlag.no_compress, operation.flags)
+
+ def testTimespec(self):
+ now = time.time()
+ timespec = cygrpc.Timespec(now)
+ self.assertAlmostEqual(now, float(timespec), places=8)
+
+ def testCompletionQueueUpDown(self):
+ completion_queue = cygrpc.CompletionQueue()
+ del completion_queue
+
+ def testServerUpDown(self):
+ server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ del server
+
+ def testChannelUpDown(self):
+ channel = cygrpc.Channel(b'[::]:0', cygrpc.ChannelArgs([]))
+ del channel
+
+ def testCredentialsMetadataPluginUpDown(self):
+ plugin = cygrpc.CredentialsMetadataPlugin(
+ lambda ignored_a, ignored_b: None, b'')
+ del plugin
+
+ def testCallCredentialsFromPluginUpDown(self):
+ plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback,
+ b'')
+ call_credentials = cygrpc.call_credentials_metadata_plugin(plugin)
+ del plugin
+ del call_credentials
+
+ def testServerStartNoExplicitShutdown(self):
+ server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ completion_queue = cygrpc.CompletionQueue()
+ server.register_completion_queue(completion_queue)
+ port = server.add_http2_port(b'[::]:0')
+ self.assertIsInstance(port, int)
+ server.start()
+ del server
+
+ def testServerStartShutdown(self):
+ completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ server.add_http2_port(b'[::]:0')
+ server.register_completion_queue(completion_queue)
+ server.start()
+ shutdown_tag = object()
+ server.shutdown(completion_queue, shutdown_tag)
+ event = completion_queue.poll()
+ self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
+ self.assertIs(shutdown_tag, event.tag)
+ del server
+ del completion_queue
class ServerClientMixin(object):
- def setUpMixin(self, server_credentials, client_credentials, host_override):
- self.server_completion_queue = cygrpc.CompletionQueue()
- self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
- self.server.register_completion_queue(self.server_completion_queue)
- if server_credentials:
- self.port = self.server.add_http2_port(b'[::]:0', server_credentials)
- else:
- self.port = self.server.add_http2_port(b'[::]:0')
- self.server.start()
- self.client_completion_queue = cygrpc.CompletionQueue()
- if client_credentials:
- client_channel_arguments = cygrpc.ChannelArgs([
- cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
- host_override)])
- self.client_channel = cygrpc.Channel(
- 'localhost:{}'.format(self.port).encode(), client_channel_arguments,
- client_credentials)
- else:
- self.client_channel = cygrpc.Channel(
- 'localhost:{}'.format(self.port).encode(), cygrpc.ChannelArgs([]))
- if host_override:
- self.host_argument = None # default host
- self.expected_host = host_override
- else:
- # arbitrary host name necessitating no further identification
- self.host_argument = b'hostess'
- self.expected_host = self.host_argument
-
- def tearDownMixin(self):
- del self.server
- del self.client_completion_queue
- del self.server_completion_queue
-
- def _perform_operations(self, operations, call, queue, deadline, description):
- """Perform the list of operations with given call, queue, and deadline.
+ def setUpMixin(self, server_credentials, client_credentials, host_override):
+ self.server_completion_queue = cygrpc.CompletionQueue()
+ self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ self.server.register_completion_queue(self.server_completion_queue)
+ if server_credentials:
+ self.port = self.server.add_http2_port(b'[::]:0',
+ server_credentials)
+ else:
+ self.port = self.server.add_http2_port(b'[::]:0')
+ self.server.start()
+ self.client_completion_queue = cygrpc.CompletionQueue()
+ if client_credentials:
+ client_channel_arguments = cygrpc.ChannelArgs([
+ cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
+ host_override)
+ ])
+ self.client_channel = cygrpc.Channel(
+ 'localhost:{}'.format(self.port).encode(),
+ client_channel_arguments, client_credentials)
+ else:
+ self.client_channel = cygrpc.Channel(
+ 'localhost:{}'.format(self.port).encode(),
+ cygrpc.ChannelArgs([]))
+ if host_override:
+ self.host_argument = None # default host
+ self.expected_host = host_override
+ else:
+ # arbitrary host name necessitating no further identification
+ self.host_argument = b'hostess'
+ self.expected_host = self.host_argument
+
+ def tearDownMixin(self):
+ del self.server
+ del self.client_completion_queue
+ del self.server_completion_queue
+
+ def _perform_operations(self, operations, call, queue, deadline,
+ description):
+ """Perform the list of operations with given call, queue, and deadline.
Invocation errors are reported with as an exception with `description` in
the message. Performs the operations asynchronously, returning a future.
"""
- def performer():
- tag = object()
- try:
- call_result = call.start_client_batch(
- cygrpc.Operations(operations), tag)
- self.assertEqual(cygrpc.CallError.ok, call_result)
- event = queue.poll(deadline)
- self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
- self.assertTrue(event.success)
- self.assertIs(tag, event.tag)
- except Exception as error:
- raise Exception("Error in '{}': {}".format(description, error.message))
- return event
- return test_utilities.SimpleFuture(performer)
-
- def testEcho(self):
- DEADLINE = time.time()+5
- DEADLINE_TOLERANCE = 0.25
- CLIENT_METADATA_ASCII_KEY = b'key'
- CLIENT_METADATA_ASCII_VALUE = b'val'
- CLIENT_METADATA_BIN_KEY = b'key-bin'
- CLIENT_METADATA_BIN_VALUE = b'\0'*1000
- SERVER_INITIAL_METADATA_KEY = b'init_me_me_me'
- SERVER_INITIAL_METADATA_VALUE = b'whodawha?'
- SERVER_TRAILING_METADATA_KEY = b'california_is_in_a_drought'
- SERVER_TRAILING_METADATA_VALUE = b'zomg it is'
- SERVER_STATUS_CODE = cygrpc.StatusCode.ok
- SERVER_STATUS_DETAILS = b'our work is never over'
- REQUEST = b'in death a member of project mayhem has a name'
- RESPONSE = b'his name is robert paulson'
- METHOD = b'twinkies'
-
- cygrpc_deadline = cygrpc.Timespec(DEADLINE)
-
- server_request_tag = object()
- request_call_result = self.server.request_call(
- self.server_completion_queue, self.server_completion_queue,
- server_request_tag)
-
- self.assertEqual(cygrpc.CallError.ok, request_call_result)
-
- client_call_tag = object()
- client_call = self.client_channel.create_call(
- None, 0, self.client_completion_queue, METHOD, self.host_argument,
- cygrpc_deadline)
- client_initial_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
- CLIENT_METADATA_ASCII_VALUE),
- cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)])
- client_start_batch_result = client_call.start_client_batch([
- cygrpc.operation_send_initial_metadata(client_initial_metadata,
- _EMPTY_FLAGS),
- cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
- ], client_call_tag)
- self.assertEqual(cygrpc.CallError.ok, client_start_batch_result)
- client_event_future = test_utilities.CompletionQueuePollFuture(
- self.client_completion_queue, cygrpc_deadline)
-
- request_event = self.server_completion_queue.poll(cygrpc_deadline)
- self.assertEqual(cygrpc.CompletionType.operation_complete,
- request_event.type)
- self.assertIsInstance(request_event.operation_call, cygrpc.Call)
- self.assertIs(server_request_tag, request_event.tag)
- self.assertEqual(0, len(request_event.batch_operations))
- self.assertTrue(
- test_common.metadata_transmitted(client_initial_metadata,
- request_event.request_metadata))
- self.assertEqual(METHOD, request_event.request_call_details.method)
- self.assertEqual(self.expected_host,
- request_event.request_call_details.host)
- self.assertLess(
- abs(DEADLINE - float(request_event.request_call_details.deadline)),
- DEADLINE_TOLERANCE)
-
- server_call_tag = object()
- server_call = request_event.operation_call
- server_initial_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(SERVER_INITIAL_METADATA_KEY,
- SERVER_INITIAL_METADATA_VALUE)])
- server_trailing_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY,
- SERVER_TRAILING_METADATA_VALUE)])
- server_start_batch_result = server_call.start_server_batch([
- cygrpc.operation_send_initial_metadata(server_initial_metadata,
- _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_send_message(RESPONSE, _EMPTY_FLAGS),
- cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- server_trailing_metadata, SERVER_STATUS_CODE,
- SERVER_STATUS_DETAILS, _EMPTY_FLAGS)
- ], server_call_tag)
- self.assertEqual(cygrpc.CallError.ok, server_start_batch_result)
-
- server_event = self.server_completion_queue.poll(cygrpc_deadline)
- client_event = client_event_future.result()
-
- self.assertEqual(6, len(client_event.batch_operations))
- found_client_op_types = set()
- for client_result in client_event.batch_operations:
- # we expect each op type to be unique
- self.assertNotIn(client_result.type, found_client_op_types)
- found_client_op_types.add(client_result.type)
- if client_result.type == cygrpc.OperationType.receive_initial_metadata:
- self.assertTrue(
- test_common.metadata_transmitted(server_initial_metadata,
- client_result.received_metadata))
- elif client_result.type == cygrpc.OperationType.receive_message:
- self.assertEqual(RESPONSE, client_result.received_message.bytes())
- elif client_result.type == cygrpc.OperationType.receive_status_on_client:
+
+ def performer():
+ tag = object()
+ try:
+ call_result = call.start_client_batch(
+ cygrpc.Operations(operations), tag)
+ self.assertEqual(cygrpc.CallError.ok, call_result)
+ event = queue.poll(deadline)
+ self.assertEqual(cygrpc.CompletionType.operation_complete,
+ event.type)
+ self.assertTrue(event.success)
+ self.assertIs(tag, event.tag)
+ except Exception as error:
+ raise Exception("Error in '{}': {}".format(description,
+ error.message))
+ return event
+
+ return test_utilities.SimpleFuture(performer)
+
+ def testEcho(self):
+ DEADLINE = time.time() + 5
+ DEADLINE_TOLERANCE = 0.25
+ CLIENT_METADATA_ASCII_KEY = b'key'
+ CLIENT_METADATA_ASCII_VALUE = b'val'
+ CLIENT_METADATA_BIN_KEY = b'key-bin'
+ CLIENT_METADATA_BIN_VALUE = b'\0' * 1000
+ SERVER_INITIAL_METADATA_KEY = b'init_me_me_me'
+ SERVER_INITIAL_METADATA_VALUE = b'whodawha?'
+ SERVER_TRAILING_METADATA_KEY = b'california_is_in_a_drought'
+ SERVER_TRAILING_METADATA_VALUE = b'zomg it is'
+ SERVER_STATUS_CODE = cygrpc.StatusCode.ok
+ SERVER_STATUS_DETAILS = b'our work is never over'
+ REQUEST = b'in death a member of project mayhem has a name'
+ RESPONSE = b'his name is robert paulson'
+ METHOD = b'twinkies'
+
+ cygrpc_deadline = cygrpc.Timespec(DEADLINE)
+
+ server_request_tag = object()
+ request_call_result = self.server.request_call(
+ self.server_completion_queue, self.server_completion_queue,
+ server_request_tag)
+
+ self.assertEqual(cygrpc.CallError.ok, request_call_result)
+
+ client_call_tag = object()
+ client_call = self.client_channel.create_call(
+ None, 0, self.client_completion_queue, METHOD, self.host_argument,
+ cygrpc_deadline)
+ client_initial_metadata = cygrpc.Metadata([
+ cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
+ CLIENT_METADATA_ASCII_VALUE),
+ cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)
+ ])
+ client_start_batch_result = client_call.start_client_batch([
+ cygrpc.operation_send_initial_metadata(client_initial_metadata,
+ _EMPTY_FLAGS),
+ cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
+ ], client_call_tag)
+ self.assertEqual(cygrpc.CallError.ok, client_start_batch_result)
+ client_event_future = test_utilities.CompletionQueuePollFuture(
+ self.client_completion_queue, cygrpc_deadline)
+
+ request_event = self.server_completion_queue.poll(cygrpc_deadline)
+ self.assertEqual(cygrpc.CompletionType.operation_complete,
+ request_event.type)
+ self.assertIsInstance(request_event.operation_call, cygrpc.Call)
+ self.assertIs(server_request_tag, request_event.tag)
+ self.assertEqual(0, len(request_event.batch_operations))
self.assertTrue(
- test_common.metadata_transmitted(server_trailing_metadata,
- client_result.received_metadata))
- self.assertEqual(SERVER_STATUS_DETAILS,
- client_result.received_status_details)
- self.assertEqual(SERVER_STATUS_CODE, client_result.received_status_code)
- self.assertEqual(set([
- cygrpc.OperationType.send_initial_metadata,
- cygrpc.OperationType.send_message,
- cygrpc.OperationType.send_close_from_client,
- cygrpc.OperationType.receive_initial_metadata,
- cygrpc.OperationType.receive_message,
- cygrpc.OperationType.receive_status_on_client
- ]), found_client_op_types)
-
- self.assertEqual(5, len(server_event.batch_operations))
- found_server_op_types = set()
- for server_result in server_event.batch_operations:
- self.assertNotIn(client_result.type, found_server_op_types)
- found_server_op_types.add(server_result.type)
- if server_result.type == cygrpc.OperationType.receive_message:
- self.assertEqual(REQUEST, server_result.received_message.bytes())
- elif server_result.type == cygrpc.OperationType.receive_close_on_server:
- self.assertFalse(server_result.received_cancelled)
- self.assertEqual(set([
- cygrpc.OperationType.send_initial_metadata,
- cygrpc.OperationType.receive_message,
- cygrpc.OperationType.send_message,
- cygrpc.OperationType.receive_close_on_server,
- cygrpc.OperationType.send_status_from_server
- ]), found_server_op_types)
-
- del client_call
- del server_call
-
- def test6522(self):
- DEADLINE = time.time()+5
- DEADLINE_TOLERANCE = 0.25
- METHOD = b'twinkies'
-
- cygrpc_deadline = cygrpc.Timespec(DEADLINE)
- empty_metadata = cygrpc.Metadata([])
-
- server_request_tag = object()
- self.server.request_call(
- self.server_completion_queue, self.server_completion_queue,
- server_request_tag)
- client_call = self.client_channel.create_call(
- None, 0, self.client_completion_queue, METHOD, self.host_argument,
- cygrpc_deadline)
-
- # Prologue
- def perform_client_operations(operations, description):
- return self._perform_operations(
- operations, client_call,
- self.client_completion_queue, cygrpc_deadline, description)
-
- client_event_future = perform_client_operations([
+ test_common.metadata_transmitted(client_initial_metadata,
+ request_event.request_metadata))
+ self.assertEqual(METHOD, request_event.request_call_details.method)
+ self.assertEqual(self.expected_host,
+ request_event.request_call_details.host)
+ self.assertLess(
+ abs(DEADLINE - float(request_event.request_call_details.deadline)),
+ DEADLINE_TOLERANCE)
+
+ server_call_tag = object()
+ server_call = request_event.operation_call
+ server_initial_metadata = cygrpc.Metadata([
+ cygrpc.Metadatum(SERVER_INITIAL_METADATA_KEY,
+ SERVER_INITIAL_METADATA_VALUE)
+ ])
+ server_trailing_metadata = cygrpc.Metadata([
+ cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY,
+ SERVER_TRAILING_METADATA_VALUE)
+ ])
+ server_start_batch_result = server_call.start_server_batch([
+ cygrpc.operation_send_initial_metadata(
+ server_initial_metadata,
+ _EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_send_message(RESPONSE, _EMPTY_FLAGS),
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ server_trailing_metadata, SERVER_STATUS_CODE,
+ SERVER_STATUS_DETAILS, _EMPTY_FLAGS)
+ ], server_call_tag)
+ self.assertEqual(cygrpc.CallError.ok, server_start_batch_result)
+
+ server_event = self.server_completion_queue.poll(cygrpc_deadline)
+ client_event = client_event_future.result()
+
+ self.assertEqual(6, len(client_event.batch_operations))
+ found_client_op_types = set()
+ for client_result in client_event.batch_operations:
+ # we expect each op type to be unique
+ self.assertNotIn(client_result.type, found_client_op_types)
+ found_client_op_types.add(client_result.type)
+ if client_result.type == cygrpc.OperationType.receive_initial_metadata:
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ server_initial_metadata,
+ client_result.received_metadata))
+ elif client_result.type == cygrpc.OperationType.receive_message:
+ self.assertEqual(RESPONSE,
+ client_result.received_message.bytes())
+ elif client_result.type == cygrpc.OperationType.receive_status_on_client:
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ server_trailing_metadata,
+ client_result.received_metadata))
+ self.assertEqual(SERVER_STATUS_DETAILS,
+ client_result.received_status_details)
+ self.assertEqual(SERVER_STATUS_CODE,
+ client_result.received_status_code)
+ self.assertEqual(
+ set([
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client
+ ]), found_client_op_types)
+
+ self.assertEqual(5, len(server_event.batch_operations))
+ found_server_op_types = set()
+ for server_result in server_event.batch_operations:
+ self.assertNotIn(client_result.type, found_server_op_types)
+ found_server_op_types.add(server_result.type)
+ if server_result.type == cygrpc.OperationType.receive_message:
+ self.assertEqual(REQUEST,
+ server_result.received_message.bytes())
+ elif server_result.type == cygrpc.OperationType.receive_close_on_server:
+ self.assertFalse(server_result.received_cancelled)
+ self.assertEqual(
+ set([
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.receive_close_on_server,
+ cygrpc.OperationType.send_status_from_server
+ ]), found_server_op_types)
+
+ del client_call
+ del server_call
+
+ def test6522(self):
+ DEADLINE = time.time() + 5
+ DEADLINE_TOLERANCE = 0.25
+ METHOD = b'twinkies'
+
+ cygrpc_deadline = cygrpc.Timespec(DEADLINE)
+ empty_metadata = cygrpc.Metadata([])
+
+ server_request_tag = object()
+ self.server.request_call(self.server_completion_queue,
+ self.server_completion_queue,
+ server_request_tag)
+ client_call = self.client_channel.create_call(
+ None, 0, self.client_completion_queue, METHOD, self.host_argument,
+ cygrpc_deadline)
+
+ # Prologue
+ def perform_client_operations(operations, description):
+ return self._perform_operations(operations, client_call,
+ self.client_completion_queue,
+ cygrpc_deadline, description)
+
+ client_event_future = perform_client_operations([
cygrpc.operation_send_initial_metadata(empty_metadata,
_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
], "Client prologue")
- request_event = self.server_completion_queue.poll(cygrpc_deadline)
- server_call = request_event.operation_call
+ request_event = self.server_completion_queue.poll(cygrpc_deadline)
+ server_call = request_event.operation_call
- def perform_server_operations(operations, description):
- return self._perform_operations(
- operations, server_call,
- self.server_completion_queue, cygrpc_deadline, description)
+ def perform_server_operations(operations, description):
+ return self._perform_operations(operations, server_call,
+ self.server_completion_queue,
+ cygrpc_deadline, description)
- server_event_future = perform_server_operations([
+ server_event_future = perform_server_operations([
cygrpc.operation_send_initial_metadata(empty_metadata,
_EMPTY_FLAGS),
], "Server prologue")
- client_event_future.result() # force completion
- server_event_future.result()
-
- # Messaging
- for _ in range(10):
- client_event_future = perform_client_operations([
- cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- ], "Client message")
- server_event_future = perform_server_operations([
- cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- ], "Server receive")
-
- client_event_future.result() # force completion
- server_event_future.result()
-
- # Epilogue
- client_event_future = perform_client_operations([
+ client_event_future.result() # force completion
+ server_event_future.result()
+
+ # Messaging
+ for _ in range(10):
+ client_event_future = perform_client_operations([
+ cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ], "Client message")
+ server_event_future = perform_server_operations([
+ cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ], "Server receive")
+
+ client_event_future.result() # force completion
+ server_event_future.result()
+
+ # Epilogue
+ client_event_future = perform_client_operations([
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
], "Client epilogue")
- server_event_future = perform_server_operations([
+ server_event_future = perform_server_operations([
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
empty_metadata, cygrpc.StatusCode.ok, b'', _EMPTY_FLAGS)
], "Server epilogue")
- client_event_future.result() # force completion
- server_event_future.result()
+ client_event_future.result() # force completion
+ server_event_future.result()
class InsecureServerInsecureClient(unittest.TestCase, ServerClientMixin):
- def setUp(self):
- self.setUpMixin(None, None, None)
+ def setUp(self):
+ self.setUpMixin(None, None, None)
- def tearDown(self):
- self.tearDownMixin()
+ def tearDown(self):
+ self.tearDownMixin()
class SecureServerSecureClient(unittest.TestCase, ServerClientMixin):
- def setUp(self):
- server_credentials = cygrpc.server_credentials_ssl(
- None, [cygrpc.SslPemKeyCertPair(resources.private_key(),
- resources.certificate_chain())], False)
- client_credentials = cygrpc.channel_credentials_ssl(
- resources.test_root_certificates(), None)
- self.setUpMixin(server_credentials, client_credentials, _SSL_HOST_OVERRIDE)
+ def setUp(self):
+ server_credentials = cygrpc.server_credentials_ssl(None, [
+ cygrpc.SslPemKeyCertPair(resources.private_key(),
+ resources.certificate_chain())
+ ], False)
+ client_credentials = cygrpc.channel_credentials_ssl(
+ resources.test_root_certificates(), None)
+ self.setUpMixin(server_credentials, client_credentials,
+ _SSL_HOST_OVERRIDE)
- def tearDown(self):
- self.tearDownMixin()
+ def tearDown(self):
+ self.tearDownMixin()
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py b/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
index 6280ce74c4..dffb3733b6 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
@@ -33,34 +33,35 @@ from grpc._cython import cygrpc
class SimpleFuture(object):
- """A simple future mechanism."""
+ """A simple future mechanism."""
- def __init__(self, function, *args, **kwargs):
- def wrapped_function():
- try:
- self._result = function(*args, **kwargs)
- except Exception as error:
- self._error = error
- self._result = None
- self._error = None
- self._thread = threading.Thread(target=wrapped_function)
- self._thread.start()
+ def __init__(self, function, *args, **kwargs):
- def result(self):
- """The resulting value of this future.
+ def wrapped_function():
+ try:
+ self._result = function(*args, **kwargs)
+ except Exception as error:
+ self._error = error
+
+ self._result = None
+ self._error = None
+ self._thread = threading.Thread(target=wrapped_function)
+ self._thread.start()
+
+ def result(self):
+ """The resulting value of this future.
Re-raises any exceptions.
"""
- self._thread.join()
- if self._error:
- # TODO(atash): re-raise exceptions in a way that preserves tracebacks
- raise self._error
- return self._result
+ self._thread.join()
+ if self._error:
+ # TODO(atash): re-raise exceptions in a way that preserves tracebacks
+ raise self._error
+ return self._result
class CompletionQueuePollFuture(SimpleFuture):
- def __init__(self, completion_queue, deadline):
- super(CompletionQueuePollFuture, self).__init__(
- lambda: completion_queue.poll(deadline))
-
+ def __init__(self, completion_queue, deadline):
+ super(CompletionQueuePollFuture,
+ self).__init__(lambda: completion_queue.poll(deadline))
diff --git a/src/python/grpcio_tests/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
index 69f4689279..4588688ea6 100644
--- a/src/python/grpcio_tests/tests/unit/_empty_message_test.py
+++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
@@ -44,95 +44,94 @@ _STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
- return _RESPONSE
+ return _RESPONSE
def handle_unary_stream(request, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH):
- yield _RESPONSE
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
- for request in request_iterator:
- pass
- return _RESPONSE
+ for request in request_iterator:
+ pass
+ return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
- for request in request_iterator:
- yield _RESPONSE
+ for request in request_iterator:
+ yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
- def __init__(self, request_streaming, response_streaming):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = None
- self.response_serializer = None
- self.unary_unary = None
- self.unary_stream = None
- self.stream_unary = None
- self.stream_stream = None
- if self.request_streaming and self.response_streaming:
- self.stream_stream = handle_stream_stream
- elif self.request_streaming:
- self.stream_unary = handle_stream_unary
- elif self.response_streaming:
- self.unary_stream = handle_unary_stream
- else:
- self.unary_unary = handle_unary_unary
+ def __init__(self, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = handle_stream_stream
+ elif self.request_streaming:
+ self.stream_unary = handle_stream_unary
+ elif self.response_streaming:
+ self.unary_stream = handle_unary_stream
+ else:
+ self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
- def service(self, handler_call_details):
- if handler_call_details.method == _UNARY_UNARY:
- return _MethodHandler(False, False)
- elif handler_call_details.method == _UNARY_STREAM:
- return _MethodHandler(False, True)
- elif handler_call_details.method == _STREAM_UNARY:
- return _MethodHandler(True, False)
- elif handler_call_details.method == _STREAM_STREAM:
- return _MethodHandler(True, True)
- else:
- return None
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(True, True)
+ else:
+ return None
class EmptyMessageTest(unittest.TestCase):
- def setUp(self):
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(
- self._server_pool, handlers=(_GenericHandler(),))
- port = self._server.add_insecure_port('[::]:0')
- self._server.start()
- self._channel = grpc.insecure_channel('localhost:%d' % port)
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(
+ self._server_pool, handlers=(_GenericHandler(),))
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
- def tearDown(self):
- self._server.stop(0)
+ def tearDown(self):
+ self._server.stop(0)
- def testUnaryUnary(self):
- response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
- self.assertEqual(_RESPONSE, response)
+ def testUnaryUnary(self):
+ response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
+ self.assertEqual(_RESPONSE, response)
- def testUnaryStream(self):
- response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
- self.assertSequenceEqual(
- [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
+ def testUnaryStream(self):
+ response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
+ self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
+ list(response_iterator))
- def testStreamUnary(self):
- response = self._channel.stream_unary(_STREAM_UNARY)(
- iter([_REQUEST] * test_constants.STREAM_LENGTH))
- self.assertEqual(_RESPONSE, response)
+ def testStreamUnary(self):
+ response = self._channel.stream_unary(_STREAM_UNARY)(iter(
+ [_REQUEST] * test_constants.STREAM_LENGTH))
+ self.assertEqual(_RESPONSE, response)
- def testStreamStream(self):
- response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
- iter([_REQUEST] * test_constants.STREAM_LENGTH))
- self.assertSequenceEqual(
- [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
+ def testStreamStream(self):
+ response_iterator = self._channel.stream_stream(_STREAM_STREAM)(iter(
+ [_REQUEST] * test_constants.STREAM_LENGTH))
+ self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
+ list(response_iterator))
if __name__ == '__main__':
- unittest.main(verbosity=2)
-
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
index 777527137f..22a6643848 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Defines a number of module-scope gRPC scenarios to test clean exit."""
import argparse
@@ -73,88 +72,88 @@ TEST_TO_METHOD = {
def hang_unary_unary(request, servicer_context):
- time.sleep(WAIT_TIME)
+ time.sleep(WAIT_TIME)
def hang_unary_stream(request, servicer_context):
- time.sleep(WAIT_TIME)
+ time.sleep(WAIT_TIME)
def hang_partial_unary_stream(request, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH // 2):
- yield request
- time.sleep(WAIT_TIME)
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ yield request
+ time.sleep(WAIT_TIME)
def hang_stream_unary(request_iterator, servicer_context):
- time.sleep(WAIT_TIME)
+ time.sleep(WAIT_TIME)
def hang_partial_stream_unary(request_iterator, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH // 2):
- next(request_iterator)
- time.sleep(WAIT_TIME)
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ next(request_iterator)
+ time.sleep(WAIT_TIME)
def hang_stream_stream(request_iterator, servicer_context):
- time.sleep(WAIT_TIME)
+ time.sleep(WAIT_TIME)
def hang_partial_stream_stream(request_iterator, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH // 2):
- yield next(request_iterator)
- time.sleep(WAIT_TIME)
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ yield next(request_iterator)
+ time.sleep(WAIT_TIME)
class MethodHandler(grpc.RpcMethodHandler):
- def __init__(self, request_streaming, response_streaming, partial_hang):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = None
- self.response_serializer = None
- self.unary_unary = None
- self.unary_stream = None
- self.stream_unary = None
- self.stream_stream = None
- if self.request_streaming and self.response_streaming:
- if partial_hang:
- self.stream_stream = hang_partial_stream_stream
- else:
- self.stream_stream = hang_stream_stream
- elif self.request_streaming:
- if partial_hang:
- self.stream_unary = hang_partial_stream_unary
- else:
- self.stream_unary = hang_stream_unary
- elif self.response_streaming:
- if partial_hang:
- self.unary_stream = hang_partial_unary_stream
- else:
- self.unary_stream = hang_unary_stream
- else:
- self.unary_unary = hang_unary_unary
+ def __init__(self, request_streaming, response_streaming, partial_hang):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ if partial_hang:
+ self.stream_stream = hang_partial_stream_stream
+ else:
+ self.stream_stream = hang_stream_stream
+ elif self.request_streaming:
+ if partial_hang:
+ self.stream_unary = hang_partial_stream_unary
+ else:
+ self.stream_unary = hang_stream_unary
+ elif self.response_streaming:
+ if partial_hang:
+ self.unary_stream = hang_partial_unary_stream
+ else:
+ self.unary_stream = hang_unary_stream
+ else:
+ self.unary_unary = hang_unary_unary
class GenericHandler(grpc.GenericRpcHandler):
- def service(self, handler_call_details):
- if handler_call_details.method == UNARY_UNARY:
- return MethodHandler(False, False, False)
- elif handler_call_details.method == UNARY_STREAM:
- return MethodHandler(False, True, False)
- elif handler_call_details.method == STREAM_UNARY:
- return MethodHandler(True, False, False)
- elif handler_call_details.method == STREAM_STREAM:
- return MethodHandler(True, True, False)
- elif handler_call_details.method == PARTIAL_UNARY_STREAM:
- return MethodHandler(False, True, True)
- elif handler_call_details.method == PARTIAL_STREAM_UNARY:
- return MethodHandler(True, False, True)
- elif handler_call_details.method == PARTIAL_STREAM_STREAM:
- return MethodHandler(True, True, True)
- else:
- return None
+ def service(self, handler_call_details):
+ if handler_call_details.method == UNARY_UNARY:
+ return MethodHandler(False, False, False)
+ elif handler_call_details.method == UNARY_STREAM:
+ return MethodHandler(False, True, False)
+ elif handler_call_details.method == STREAM_UNARY:
+ return MethodHandler(True, False, False)
+ elif handler_call_details.method == STREAM_STREAM:
+ return MethodHandler(True, True, False)
+ elif handler_call_details.method == PARTIAL_UNARY_STREAM:
+ return MethodHandler(False, True, True)
+ elif handler_call_details.method == PARTIAL_STREAM_UNARY:
+ return MethodHandler(True, False, True)
+ elif handler_call_details.method == PARTIAL_STREAM_STREAM:
+ return MethodHandler(True, True, True)
+ else:
+ return None
# Traditional executors will not exit until all their
@@ -162,88 +161,88 @@ class GenericHandler(grpc.GenericRpcHandler):
# never finish, we don't want to block exit on these jobs.
class DaemonPool(object):
- def submit(self, fn, *args, **kwargs):
- thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
- thread.daemon = True
- thread.start()
+ def submit(self, fn, *args, **kwargs):
+ thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
+ thread.daemon = True
+ thread.start()
- def shutdown(self, wait=True):
- pass
+ def shutdown(self, wait=True):
+ pass
def infinite_request_iterator():
- while True:
- yield REQUEST
+ while True:
+ yield REQUEST
if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('scenario', type=str)
- parser.add_argument(
- '--wait_for_interrupt', dest='wait_for_interrupt', action='store_true')
- args = parser.parse_args()
-
- if args.scenario == UNSTARTED_SERVER:
- server = grpc.server(DaemonPool())
- if args.wait_for_interrupt:
- time.sleep(WAIT_TIME)
- elif args.scenario == RUNNING_SERVER:
- server = grpc.server(DaemonPool())
- port = server.add_insecure_port('[::]:0')
- server.start()
- if args.wait_for_interrupt:
- time.sleep(WAIT_TIME)
- elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
- channel = grpc.insecure_channel('localhost:12345')
-
- def connectivity_callback(connectivity):
- pass
-
- channel.subscribe(connectivity_callback, try_to_connect=True)
- if args.wait_for_interrupt:
- time.sleep(WAIT_TIME)
- elif args.scenario == POLL_CONNECTIVITY:
- server = grpc.server(DaemonPool())
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:%d' % port)
-
- def connectivity_callback(connectivity):
- pass
-
- channel.subscribe(connectivity_callback, try_to_connect=True)
- if args.wait_for_interrupt:
- time.sleep(WAIT_TIME)
-
- else:
- handler = GenericHandler()
- server = grpc.server(DaemonPool())
- port = server.add_insecure_port('[::]:0')
- server.add_generic_rpc_handlers((handler,))
- server.start()
- channel = grpc.insecure_channel('localhost:%d' % port)
-
- method = TEST_TO_METHOD[args.scenario]
-
- if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
- multi_callable = channel.unary_unary(method)
- future = multi_callable.future(REQUEST)
- result, call = multi_callable.with_call(REQUEST)
- elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
- args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
- multi_callable = channel.unary_stream(method)
- response_iterator = multi_callable(REQUEST)
- for response in response_iterator:
- pass
- elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
- args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
- multi_callable = channel.stream_unary(method)
- future = multi_callable.future(infinite_request_iterator())
- result, call = multi_callable.with_call(
- iter([REQUEST] * test_constants.STREAM_LENGTH))
- elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
- args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
- multi_callable = channel.stream_stream(method)
- response_iterator = multi_callable(infinite_request_iterator())
- for response in response_iterator:
- pass
+ parser = argparse.ArgumentParser()
+ parser.add_argument('scenario', type=str)
+ parser.add_argument(
+ '--wait_for_interrupt', dest='wait_for_interrupt', action='store_true')
+ args = parser.parse_args()
+
+ if args.scenario == UNSTARTED_SERVER:
+ server = grpc.server(DaemonPool())
+ if args.wait_for_interrupt:
+ time.sleep(WAIT_TIME)
+ elif args.scenario == RUNNING_SERVER:
+ server = grpc.server(DaemonPool())
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ if args.wait_for_interrupt:
+ time.sleep(WAIT_TIME)
+ elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
+ channel = grpc.insecure_channel('localhost:12345')
+
+ def connectivity_callback(connectivity):
+ pass
+
+ channel.subscribe(connectivity_callback, try_to_connect=True)
+ if args.wait_for_interrupt:
+ time.sleep(WAIT_TIME)
+ elif args.scenario == POLL_CONNECTIVITY:
+ server = grpc.server(DaemonPool())
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def connectivity_callback(connectivity):
+ pass
+
+ channel.subscribe(connectivity_callback, try_to_connect=True)
+ if args.wait_for_interrupt:
+ time.sleep(WAIT_TIME)
+
+ else:
+ handler = GenericHandler()
+ server = grpc.server(DaemonPool())
+ port = server.add_insecure_port('[::]:0')
+ server.add_generic_rpc_handlers((handler,))
+ server.start()
+ channel = grpc.insecure_channel('localhost:%d' % port)
+
+ method = TEST_TO_METHOD[args.scenario]
+
+ if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
+ multi_callable = channel.unary_unary(method)
+ future = multi_callable.future(REQUEST)
+ result, call = multi_callable.with_call(REQUEST)
+ elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
+ args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
+ multi_callable = channel.unary_stream(method)
+ response_iterator = multi_callable(REQUEST)
+ for response in response_iterator:
+ pass
+ elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
+ args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
+ multi_callable = channel.stream_unary(method)
+ future = multi_callable.future(infinite_request_iterator())
+ result, call = multi_callable.with_call(
+ iter([REQUEST] * test_constants.STREAM_LENGTH))
+ elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
+ args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
+ multi_callable = channel.stream_stream(method)
+ response_iterator = multi_callable(infinite_request_iterator())
+ for response in response_iterator:
+ pass
diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py
index 5a4a32887c..b99605dcb8 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_test.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests clean exit of server/client on Python Interpreter exit/sigint.
The tests in this module spawn a subprocess for each test case, the
@@ -45,15 +44,15 @@ import unittest
from tests.unit import _exit_scenarios
-SCENARIO_FILE = os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), '_exit_scenarios.py'))
+SCENARIO_FILE = os.path.abspath(
+ os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), '_exit_scenarios.py'))
INTERPRETER = sys.executable
BASE_COMMAND = [INTERPRETER, SCENARIO_FILE]
BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt']
INIT_TIME = 1.0
-
processes = []
process_lock = threading.Lock()
@@ -61,126 +60,146 @@ process_lock = threading.Lock()
# Make sure we attempt to clean up any
# processes we may have left running
def cleanup_processes():
- with process_lock:
- for process in processes:
- try:
- process.kill()
- except Exception:
- pass
+ with process_lock:
+ for process in processes:
+ try:
+ process.kill()
+ except Exception:
+ pass
+
+
atexit.register(cleanup_processes)
def interrupt_and_wait(process):
- with process_lock:
- processes.append(process)
- time.sleep(INIT_TIME)
- os.kill(process.pid, signal.SIGINT)
- process.wait()
+ with process_lock:
+ processes.append(process)
+ time.sleep(INIT_TIME)
+ os.kill(process.pid, signal.SIGINT)
+ process.wait()
def wait(process):
- with process_lock:
- processes.append(process)
- process.wait()
+ with process_lock:
+ processes.append(process)
+ process.wait()
@unittest.skip('https://github.com/grpc/grpc/issues/7311')
class ExitTest(unittest.TestCase):
- def test_unstarted_server(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
- stdout=sys.stdout, stderr=sys.stderr)
- wait(process)
-
- def test_unstarted_server_terminate(self):
- process = subprocess.Popen(
- BASE_SIGTERM_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
- stdout=sys.stdout)
- interrupt_and_wait(process)
-
- def test_running_server(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.RUNNING_SERVER],
- stdout=sys.stdout, stderr=sys.stderr)
- wait(process)
-
- def test_running_server_terminate(self):
- process = subprocess.Popen(
- BASE_SIGTERM_COMMAND + [_exit_scenarios.RUNNING_SERVER],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- def test_poll_connectivity_no_server(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
- stdout=sys.stdout, stderr=sys.stderr)
- wait(process)
-
- def test_poll_connectivity_no_server_terminate(self):
- process = subprocess.Popen(
- BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- def test_poll_connectivity(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
- stdout=sys.stdout, stderr=sys.stderr)
- wait(process)
-
- def test_poll_connectivity_terminate(self):
- process = subprocess.Popen(
- BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- def test_in_flight_unary_unary_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_UNARY_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
- def test_in_flight_unary_stream_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_STREAM_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- def test_in_flight_stream_unary_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_UNARY_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
- def test_in_flight_stream_stream_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_STREAM_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
- def test_in_flight_partial_unary_stream_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- def test_in_flight_partial_stream_unary_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
-
- @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
- def test_in_flight_partial_stream_stream_call(self):
- process = subprocess.Popen(
- BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL],
- stdout=sys.stdout, stderr=sys.stderr)
- interrupt_and_wait(process)
+ def test_unstarted_server(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ wait(process)
+
+ def test_unstarted_server_terminate(self):
+ process = subprocess.Popen(
+ BASE_SIGTERM_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
+ stdout=sys.stdout)
+ interrupt_and_wait(process)
+
+ def test_running_server(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.RUNNING_SERVER],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ wait(process)
+
+ def test_running_server_terminate(self):
+ process = subprocess.Popen(
+ BASE_SIGTERM_COMMAND + [_exit_scenarios.RUNNING_SERVER],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ def test_poll_connectivity_no_server(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ wait(process)
+
+ def test_poll_connectivity_no_server_terminate(self):
+ process = subprocess.Popen(
+ BASE_SIGTERM_COMMAND +
+ [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ def test_poll_connectivity(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ wait(process)
+
+ def test_poll_connectivity_terminate(self):
+ process = subprocess.Popen(
+ BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ def test_in_flight_unary_unary_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_UNARY_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+ def test_in_flight_unary_stream_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_STREAM_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ def test_in_flight_stream_unary_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_UNARY_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+ def test_in_flight_stream_stream_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_STREAM_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+ def test_in_flight_partial_unary_stream_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND +
+ [_exit_scenarios.IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ def test_in_flight_partial_stream_unary_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND +
+ [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
+
+ @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+ def test_in_flight_partial_stream_stream_call(self):
+ process = subprocess.Popen(
+ BASE_COMMAND +
+ [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL],
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ interrupt_and_wait(process)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
index 2dc225de29..1b1b1bd598 100644
--- a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test of RPCs made against gRPC Python's application-layer API."""
import unittest
@@ -47,129 +46,131 @@ _STREAM_STREAM = '/test/StreamStream'
def _unary_unary_multi_callable(channel):
- return channel.unary_unary(_UNARY_UNARY)
+ return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
- return channel.unary_stream(
- _UNARY_STREAM,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.unary_stream(
+ _UNARY_STREAM,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
- return channel.stream_unary(
- _STREAM_UNARY,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.stream_unary(
+ _STREAM_UNARY,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
- return channel.stream_stream(_STREAM_STREAM)
+ return channel.stream_stream(_STREAM_STREAM)
class InvalidMetadataTest(unittest.TestCase):
- def setUp(self):
- self._channel = grpc.insecure_channel('localhost:8080')
- self._unary_unary = _unary_unary_multi_callable(self._channel)
- self._unary_stream = _unary_stream_multi_callable(self._channel)
- self._stream_unary = _stream_unary_multi_callable(self._channel)
- self._stream_stream = _stream_stream_multi_callable(self._channel)
-
- def testUnaryRequestBlockingUnaryResponse(self):
- request = b'\x07\x08'
- metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- with self.assertRaises(ValueError) as exception_context:
- self._unary_unary(request, metadata=metadata)
- self.assertIn(expected_error_details, str(exception_context.exception))
-
- def testUnaryRequestBlockingUnaryResponseWithCall(self):
- request = b'\x07\x08'
- metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- with self.assertRaises(ValueError) as exception_context:
- self._unary_unary.with_call(request, metadata=metadata)
- self.assertIn(expected_error_details, str(exception_context.exception))
-
- def testUnaryRequestFutureUnaryResponse(self):
- request = b'\x07\x08'
- metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- response_future = self._unary_unary.future(request, metadata=metadata)
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertEqual(
- exception_context.exception.details(), expected_error_details)
- self.assertEqual(
- exception_context.exception.code(), grpc.StatusCode.INTERNAL)
- self.assertEqual(response_future.details(), expected_error_details)
- self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
-
- def testUnaryRequestStreamResponse(self):
- request = b'\x37\x58'
- metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- response_iterator = self._unary_stream(request, metadata=metadata)
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(response_iterator)
- self.assertEqual(
- exception_context.exception.details(), expected_error_details)
- self.assertEqual(
- exception_context.exception.code(), grpc.StatusCode.INTERNAL)
- self.assertEqual(response_iterator.details(), expected_error_details)
- self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
-
- def testStreamRequestBlockingUnaryResponse(self):
- request_iterator = (b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- with self.assertRaises(ValueError) as exception_context:
- self._stream_unary(request_iterator, metadata=metadata)
- self.assertIn(expected_error_details, str(exception_context.exception))
-
- def testStreamRequestBlockingUnaryResponseWithCall(self):
- request_iterator = (
- b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self.assertRaises(ValueError) as exception_context:
- multi_callable.with_call(request_iterator, metadata=metadata)
- self.assertIn(expected_error_details, str(exception_context.exception))
-
- def testStreamRequestFutureUnaryResponse(self):
- request_iterator = (
- b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- response_future = self._stream_unary.future(
- request_iterator, metadata=metadata)
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertEqual(
- exception_context.exception.details(), expected_error_details)
- self.assertEqual(
- exception_context.exception.code(), grpc.StatusCode.INTERNAL)
- self.assertEqual(response_future.details(), expected_error_details)
- self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
-
- def testStreamRequestStreamResponse(self):
- request_iterator = (
- b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- metadata = (('InVaLiD', 'StreamRequestStreamResponse'),)
- expected_error_details = "metadata was invalid: %s" % metadata
- response_iterator = self._stream_stream(request_iterator, metadata=metadata)
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(response_iterator)
- self.assertEqual(
- exception_context.exception.details(), expected_error_details)
- self.assertEqual(
- exception_context.exception.code(), grpc.StatusCode.INTERNAL)
- self.assertEqual(response_iterator.details(), expected_error_details)
- self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
+ def setUp(self):
+ self._channel = grpc.insecure_channel('localhost:8080')
+ self._unary_unary = _unary_unary_multi_callable(self._channel)
+ self._unary_stream = _unary_stream_multi_callable(self._channel)
+ self._stream_unary = _stream_unary_multi_callable(self._channel)
+ self._stream_stream = _stream_stream_multi_callable(self._channel)
+
+ def testUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x07\x08'
+ metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ with self.assertRaises(ValueError) as exception_context:
+ self._unary_unary(request, metadata=metadata)
+ self.assertIn(expected_error_details, str(exception_context.exception))
+
+ def testUnaryRequestBlockingUnaryResponseWithCall(self):
+ request = b'\x07\x08'
+ metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ with self.assertRaises(ValueError) as exception_context:
+ self._unary_unary.with_call(request, metadata=metadata)
+ self.assertIn(expected_error_details, str(exception_context.exception))
+
+ def testUnaryRequestFutureUnaryResponse(self):
+ request = b'\x07\x08'
+ metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ response_future = self._unary_unary.future(request, metadata=metadata)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertEqual(exception_context.exception.details(),
+ expected_error_details)
+ self.assertEqual(exception_context.exception.code(),
+ grpc.StatusCode.INTERNAL)
+ self.assertEqual(response_future.details(), expected_error_details)
+ self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
+
+ def testUnaryRequestStreamResponse(self):
+ request = b'\x37\x58'
+ metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ response_iterator = self._unary_stream(request, metadata=metadata)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(response_iterator)
+ self.assertEqual(exception_context.exception.details(),
+ expected_error_details)
+ self.assertEqual(exception_context.exception.code(),
+ grpc.StatusCode.INTERNAL)
+ self.assertEqual(response_iterator.details(), expected_error_details)
+ self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
+
+ def testStreamRequestBlockingUnaryResponse(self):
+ request_iterator = (b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ with self.assertRaises(ValueError) as exception_context:
+ self._stream_unary(request_iterator, metadata=metadata)
+ self.assertIn(expected_error_details, str(exception_context.exception))
+
+ def testStreamRequestBlockingUnaryResponseWithCall(self):
+ request_iterator = (b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self.assertRaises(ValueError) as exception_context:
+ multi_callable.with_call(request_iterator, metadata=metadata)
+ self.assertIn(expected_error_details, str(exception_context.exception))
+
+ def testStreamRequestFutureUnaryResponse(self):
+ request_iterator = (b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ response_future = self._stream_unary.future(
+ request_iterator, metadata=metadata)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertEqual(exception_context.exception.details(),
+ expected_error_details)
+ self.assertEqual(exception_context.exception.code(),
+ grpc.StatusCode.INTERNAL)
+ self.assertEqual(response_future.details(), expected_error_details)
+ self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
+
+ def testStreamRequestStreamResponse(self):
+ request_iterator = (b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ metadata = (('InVaLiD', 'StreamRequestStreamResponse'),)
+ expected_error_details = "metadata was invalid: %s" % metadata
+ response_iterator = self._stream_stream(
+ request_iterator, metadata=metadata)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(response_iterator)
+ self.assertEqual(exception_context.exception.details(),
+ expected_error_details)
+ self.assertEqual(exception_context.exception.code(),
+ grpc.StatusCode.INTERNAL)
+ self.assertEqual(response_iterator.details(), expected_error_details)
+ self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
index 4312679bb9..efeb237874 100644
--- a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
@@ -50,106 +50,117 @@ _STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._value = None
- self._called = False
- def __call__(self, value):
- with self._condition:
- self._value = value
- self._called = True
- self._condition.notify_all()
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
+ self._called = False
- def value(self):
- with self._condition:
- while not self._called:
- self._condition.wait()
- return self._value
+ def __call__(self, value):
+ with self._condition:
+ self._value = value
+ self._called = True
+ self._condition.notify_all()
+
+ def value(self):
+ with self._condition:
+ while not self._called:
+ self._condition.wait()
+ return self._value
class _Handler(object):
- def __init__(self, control):
- self._control = control
-
- def handle_unary_unary(self, request, servicer_context):
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- return request
-
- def handle_unary_stream(self, request, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH):
- self._control.control()
- yield request
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
-
- def handle_stream_unary(self, request_iterator, servicer_context):
- if servicer_context is not None:
- servicer_context.invocation_metadata()
- self._control.control()
- response_elements = []
- for request in request_iterator:
- self._control.control()
- response_elements.append(request)
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- return b''.join(response_elements)
-
- def handle_stream_stream(self, request_iterator, servicer_context):
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- for request in request_iterator:
- self._control.control()
- yield request
- self._control.control()
+
+ def __init__(self, control):
+ self._control = control
+
+ def handle_unary_unary(self, request, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ return request
+
+ def handle_unary_stream(self, request, servicer_context):
+ for _ in range(test_constants.STREAM_LENGTH):
+ self._control.control()
+ yield request
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+
+ def handle_stream_unary(self, request_iterator, servicer_context):
+ if servicer_context is not None:
+ servicer_context.invocation_metadata()
+ self._control.control()
+ response_elements = []
+ for request in request_iterator:
+ self._control.control()
+ response_elements.append(request)
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ return b''.join(response_elements)
+
+ def handle_stream_stream(self, request_iterator, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ for request in request_iterator:
+ self._control.control()
+ yield request
+ self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
- def __init__(
- self, request_streaming, response_streaming, request_deserializer,
- response_serializer, unary_unary, unary_stream, stream_unary,
- stream_stream):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = request_deserializer
- self.response_serializer = response_serializer
- self.unary_unary = unary_unary
- self.unary_stream = unary_stream
- self.stream_unary = stream_unary
- self.stream_stream = stream_stream
+
+ def __init__(self, request_streaming, response_streaming,
+ request_deserializer, response_serializer, unary_unary,
+ unary_stream, stream_unary, stream_stream):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = request_deserializer
+ self.response_serializer = response_serializer
+ self.unary_unary = unary_unary
+ self.unary_stream = unary_stream
+ self.stream_unary = stream_unary
+ self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
- def __init__(self, handler):
- self._handler = handler
-
- def service(self, handler_call_details):
- if handler_call_details.method == _UNARY_UNARY:
- return _MethodHandler(
- False, False, None, None, self._handler.handle_unary_unary, None,
- None, None)
- elif handler_call_details.method == _UNARY_STREAM:
- return _MethodHandler(
- False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None,
- self._handler.handle_unary_stream, None, None)
- elif handler_call_details.method == _STREAM_UNARY:
- return _MethodHandler(
- True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None,
- self._handler.handle_stream_unary, None)
- elif handler_call_details.method == _STREAM_STREAM:
- return _MethodHandler(
- True, True, None, None, None, None, None,
- self._handler.handle_stream_stream)
- else:
- return None
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(False, False, None, None,
+ self._handler.handle_unary_unary, None, None,
+ None)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
+ _SERIALIZE_RESPONSE, None,
+ self._handler.handle_unary_stream, None, None)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
+ _SERIALIZE_RESPONSE, None, None,
+ self._handler.handle_stream_unary, None)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(True, True, None, None, None, None, None,
+ self._handler.handle_stream_stream)
+ else:
+ return None
class FailAfterFewIterationsCounter(object):
+
def __init__(self, high, bytestring):
self._current = 0
self._high = high
@@ -167,81 +178,82 @@ class FailAfterFewIterationsCounter(object):
def _unary_unary_multi_callable(channel):
- return channel.unary_unary(_UNARY_UNARY)
+ return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
- return channel.unary_stream(
- _UNARY_STREAM,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.unary_stream(
+ _UNARY_STREAM,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
- return channel.stream_unary(
- _STREAM_UNARY,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.stream_unary(
+ _STREAM_UNARY,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
- return channel.stream_stream(_STREAM_STREAM)
+ return channel.stream_stream(_STREAM_STREAM)
class InvocationDefectsTest(unittest.TestCase):
- def setUp(self):
- self._control = test_control.PauseFailControl()
- self._handler = _Handler(self._control)
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-
- self._server = grpc.server(self._server_pool)
- port = self._server.add_insecure_port('[::]:0')
- self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
- self._server.start()
-
- self._channel = grpc.insecure_channel('localhost:%d' % port)
-
- def tearDown(self):
- self._server.stop(0)
-
- def testIterableStreamRequestBlockingUnaryResponse(self):
- requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
- multi_callable = _stream_unary_multi_callable(self._channel)
-
- with self.assertRaises(grpc.RpcError):
- response = multi_callable(
- requests,
- metadata=(('test', 'IterableStreamRequestBlockingUnaryResponse'),))
-
- def testIterableStreamRequestFutureUnaryResponse(self):
- requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
- multi_callable = _stream_unary_multi_callable(self._channel)
- response_future = multi_callable.future(
- requests,
- metadata=(
- ('test', 'IterableStreamRequestFutureUnaryResponse'),))
-
- with self.assertRaises(grpc.RpcError):
- response = response_future.result()
-
- def testIterableStreamRequestStreamResponse(self):
- requests = [b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)]
- multi_callable = _stream_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- requests,
- metadata=(('test', 'IterableStreamRequestStreamResponse'),))
-
- with self.assertRaises(grpc.RpcError):
- next(response_iterator)
-
- def testIteratorStreamRequestStreamResponse(self):
- requests_iterator = FailAfterFewIterationsCounter(
- test_constants.STREAM_LENGTH // 2, b'\x07\x08')
- multi_callable = _stream_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- requests_iterator,
- metadata=(('test', 'IteratorStreamRequestStreamResponse'),))
-
- with self.assertRaises(grpc.RpcError):
- for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
- next(response_iterator)
+
+ def setUp(self):
+ self._control = test_control.PauseFailControl()
+ self._handler = _Handler(self._control)
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+
+ self._server = grpc.server(self._server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
+ self._server.start()
+
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testIterableStreamRequestBlockingUnaryResponse(self):
+ requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
+ multi_callable = _stream_unary_multi_callable(self._channel)
+
+ with self.assertRaises(grpc.RpcError):
+ response = multi_callable(
+ requests,
+ metadata=(
+ ('test', 'IterableStreamRequestBlockingUnaryResponse'),))
+
+ def testIterableStreamRequestFutureUnaryResponse(self):
+ requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response_future = multi_callable.future(
+ requests,
+ metadata=(('test', 'IterableStreamRequestFutureUnaryResponse'),))
+
+ with self.assertRaises(grpc.RpcError):
+ response = response_future.result()
+
+ def testIterableStreamRequestStreamResponse(self):
+ requests = [b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)]
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ requests,
+ metadata=(('test', 'IterableStreamRequestStreamResponse'),))
+
+ with self.assertRaises(grpc.RpcError):
+ next(response_iterator)
+
+ def testIteratorStreamRequestStreamResponse(self):
+ requests_iterator = FailAfterFewIterationsCounter(
+ test_constants.STREAM_LENGTH // 2, b'\x07\x08')
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ requests_iterator,
+ metadata=(('test', 'IteratorStreamRequestStreamResponse'),))
+
+ with self.assertRaises(grpc.RpcError):
+ for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
+ next(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py
index eef18f82d6..70f437bc83 100644
--- a/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py
+++ b/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py
@@ -35,7 +35,7 @@
# source: stock.proto
import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
@@ -45,108 +45,135 @@ from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
-
-
-
DESCRIPTOR = _descriptor.FileDescriptor(
- name='stock.proto',
- package='stock',
- serialized_pb=_b('\n\x0bstock.proto\x12\x05stock\">\n\x0cStockRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x1e\n\x13num_trades_to_watch\x18\x02 \x01(\x05:\x01\x30\"+\n\nStockReply\x12\r\n\x05price\x18\x01 \x01(\x02\x12\x0e\n\x06symbol\x18\x02 \x01(\t2\x96\x02\n\x05Stock\x12=\n\x11GetLastTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x12I\n\x19GetLastTradePriceMultiple\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01\x30\x01\x12?\n\x11WatchFutureTrades\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x30\x01\x12\x42\n\x14GetHighestTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01')
-)
+ name='stock.proto',
+ package='stock',
+ serialized_pb=_b(
+ '\n\x0bstock.proto\x12\x05stock\">\n\x0cStockRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x1e\n\x13num_trades_to_watch\x18\x02 \x01(\x05:\x01\x30\"+\n\nStockReply\x12\r\n\x05price\x18\x01 \x01(\x02\x12\x0e\n\x06symbol\x18\x02 \x01(\t2\x96\x02\n\x05Stock\x12=\n\x11GetLastTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x12I\n\x19GetLastTradePriceMultiple\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01\x30\x01\x12?\n\x11WatchFutureTrades\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00\x30\x01\x12\x42\n\x14GetHighestTradePrice\x12\x13.stock.StockRequest\x1a\x11.stock.StockReply\"\x00(\x01'
+ ))
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
_STOCKREQUEST = _descriptor.Descriptor(
- name='StockRequest',
- full_name='stock.StockRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='symbol', full_name='stock.StockRequest.symbol', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='num_trades_to_watch', full_name='stock.StockRequest.num_trades_to_watch', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=True, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=22,
- serialized_end=84,
-)
-
+ name='StockRequest',
+ full_name='stock.StockRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='symbol',
+ full_name='stock.StockRequest.symbol',
+ index=0,
+ number=1,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=_b("").decode('utf-8'),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='num_trades_to_watch',
+ full_name='stock.StockRequest.num_trades_to_watch',
+ index=1,
+ number=2,
+ type=5,
+ cpp_type=1,
+ label=1,
+ has_default_value=True,
+ default_value=0,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ options=None),
+ ],
+ extensions=[],
+ nested_types=[],
+ enum_types=[],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[],
+ serialized_start=22,
+ serialized_end=84,)
_STOCKREPLY = _descriptor.Descriptor(
- name='StockReply',
- full_name='stock.StockReply',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='price', full_name='stock.StockReply.price', index=0,
- number=1, type=2, cpp_type=6, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='symbol', full_name='stock.StockReply.symbol', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=86,
- serialized_end=129,
-)
+ name='StockReply',
+ full_name='stock.StockReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='price',
+ full_name='stock.StockReply.price',
+ index=0,
+ number=1,
+ type=2,
+ cpp_type=6,
+ label=1,
+ has_default_value=False,
+ default_value=0,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='symbol',
+ full_name='stock.StockReply.symbol',
+ index=1,
+ number=2,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=_b("").decode('utf-8'),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ options=None),
+ ],
+ extensions=[],
+ nested_types=[],
+ enum_types=[],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[],
+ serialized_start=86,
+ serialized_end=129,)
DESCRIPTOR.message_types_by_name['StockRequest'] = _STOCKREQUEST
DESCRIPTOR.message_types_by_name['StockReply'] = _STOCKREPLY
-StockRequest = _reflection.GeneratedProtocolMessageType('StockRequest', (_message.Message,), dict(
- DESCRIPTOR = _STOCKREQUEST,
- __module__ = 'stock_pb2'
- # @@protoc_insertion_point(class_scope:stock.StockRequest)
- ))
+StockRequest = _reflection.GeneratedProtocolMessageType(
+ 'StockRequest',
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_STOCKREQUEST,
+ __module__='stock_pb2'
+ # @@protoc_insertion_point(class_scope:stock.StockRequest)
+ ))
_sym_db.RegisterMessage(StockRequest)
-StockReply = _reflection.GeneratedProtocolMessageType('StockReply', (_message.Message,), dict(
- DESCRIPTOR = _STOCKREPLY,
- __module__ = 'stock_pb2'
- # @@protoc_insertion_point(class_scope:stock.StockReply)
- ))
+StockReply = _reflection.GeneratedProtocolMessageType(
+ 'StockReply',
+ (_message.Message,),
+ dict(
+ DESCRIPTOR=_STOCKREPLY,
+ __module__='stock_pb2'
+ # @@protoc_insertion_point(class_scope:stock.StockReply)
+ ))
_sym_db.RegisterMessage(StockReply)
-
# @@protoc_insertion_point(module_scope)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index fb3e547781..af2ce64dce 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests application-provided metadata, status code, and details."""
import threading
@@ -53,20 +52,16 @@ _UNARY_STREAM = 'UnaryStream'
_STREAM_UNARY = 'StreamUnary'
_STREAM_STREAM = 'StreamStream'
-_CLIENT_METADATA = (
- ('client-md-key', 'client-md-key'),
- ('client-md-key-bin', b'\x00\x01')
-)
+_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
+ ('client-md-key-bin', b'\x00\x01'))
_SERVER_INITIAL_METADATA = (
('server-initial-md-key', 'server-initial-md-value'),
- ('server-initial-md-key-bin', b'\x00\x02')
-)
+ ('server-initial-md-key-bin', b'\x00\x02'))
_SERVER_TRAILING_METADATA = (
('server-trailing-md-key', 'server-trailing-md-value'),
- ('server-trailing-md-key-bin', b'\x00\x03')
-)
+ ('server-trailing-md-key-bin', b'\x00\x03'))
_NON_OK_CODE = grpc.StatusCode.NOT_FOUND
_DETAILS = 'Test details!'
@@ -74,450 +69,464 @@ _DETAILS = 'Test details!'
class _Servicer(object):
- def __init__(self):
- self._lock = threading.Lock()
- self._code = None
- self._details = None
- self._exception = False
- self._return_none = False
- self._received_client_metadata = None
-
- def unary_unary(self, request, context):
- with self._lock:
- self._received_client_metadata = context.invocation_metadata()
- context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- if self._code is not None:
- context.set_code(self._code)
- if self._details is not None:
- context.set_details(self._details)
- if self._exception:
- raise test_control.Defect()
- else:
- return None if self._return_none else object()
-
- def unary_stream(self, request, context):
- with self._lock:
- self._received_client_metadata = context.invocation_metadata()
- context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- if self._code is not None:
- context.set_code(self._code)
- if self._details is not None:
- context.set_details(self._details)
- for _ in range(test_constants.STREAM_LENGTH // 2):
- yield _SERIALIZED_RESPONSE
- if self._exception:
- raise test_control.Defect()
-
- def stream_unary(self, request_iterator, context):
- with self._lock:
- self._received_client_metadata = context.invocation_metadata()
- context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- if self._code is not None:
- context.set_code(self._code)
- if self._details is not None:
- context.set_details(self._details)
- # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
- # request iterator.
- for ignored_request in request_iterator:
- pass
- if self._exception:
- raise test_control.Defect()
- else:
- return None if self._return_none else _SERIALIZED_RESPONSE
-
- def stream_stream(self, request_iterator, context):
- with self._lock:
- self._received_client_metadata = context.invocation_metadata()
- context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- if self._code is not None:
- context.set_code(self._code)
- if self._details is not None:
- context.set_details(self._details)
- # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
- # request iterator.
- for ignored_request in request_iterator:
- pass
- for _ in range(test_constants.STREAM_LENGTH // 3):
- yield object()
- if self._exception:
- raise test_control.Defect()
-
- def set_code(self, code):
- with self._lock:
- self._code = code
-
- def set_details(self, details):
- with self._lock:
- self._details = details
-
- def set_exception(self):
- with self._lock:
- self._exception = True
-
- def set_return_none(self):
- with self._lock:
- self._return_none = True
-
- def received_client_metadata(self):
- with self._lock:
- return self._received_client_metadata
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._code = None
+ self._details = None
+ self._exception = False
+ self._return_none = False
+ self._received_client_metadata = None
+
+ def unary_unary(self, request, context):
+ with self._lock:
+ self._received_client_metadata = context.invocation_metadata()
+ context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ if self._code is not None:
+ context.set_code(self._code)
+ if self._details is not None:
+ context.set_details(self._details)
+ if self._exception:
+ raise test_control.Defect()
+ else:
+ return None if self._return_none else object()
+
+ def unary_stream(self, request, context):
+ with self._lock:
+ self._received_client_metadata = context.invocation_metadata()
+ context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ if self._code is not None:
+ context.set_code(self._code)
+ if self._details is not None:
+ context.set_details(self._details)
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ yield _SERIALIZED_RESPONSE
+ if self._exception:
+ raise test_control.Defect()
+
+ def stream_unary(self, request_iterator, context):
+ with self._lock:
+ self._received_client_metadata = context.invocation_metadata()
+ context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ if self._code is not None:
+ context.set_code(self._code)
+ if self._details is not None:
+ context.set_details(self._details)
+ # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
+ # request iterator.
+ for ignored_request in request_iterator:
+ pass
+ if self._exception:
+ raise test_control.Defect()
+ else:
+ return None if self._return_none else _SERIALIZED_RESPONSE
+
+ def stream_stream(self, request_iterator, context):
+ with self._lock:
+ self._received_client_metadata = context.invocation_metadata()
+ context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ if self._code is not None:
+ context.set_code(self._code)
+ if self._details is not None:
+ context.set_details(self._details)
+ # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
+ # request iterator.
+ for ignored_request in request_iterator:
+ pass
+ for _ in range(test_constants.STREAM_LENGTH // 3):
+ yield object()
+ if self._exception:
+ raise test_control.Defect()
+
+ def set_code(self, code):
+ with self._lock:
+ self._code = code
+
+ def set_details(self, details):
+ with self._lock:
+ self._details = details
+
+ def set_exception(self):
+ with self._lock:
+ self._exception = True
+
+ def set_return_none(self):
+ with self._lock:
+ self._return_none = True
+
+ def received_client_metadata(self):
+ with self._lock:
+ return self._received_client_metadata
def _generic_handler(servicer):
- method_handlers = {
- _UNARY_UNARY: grpc.unary_unary_rpc_method_handler(
- servicer.unary_unary, request_deserializer=_REQUEST_DESERIALIZER,
- response_serializer=_RESPONSE_SERIALIZER),
- _UNARY_STREAM: grpc.unary_stream_rpc_method_handler(
- servicer.unary_stream),
- _STREAM_UNARY: grpc.stream_unary_rpc_method_handler(
- servicer.stream_unary),
- _STREAM_STREAM: grpc.stream_stream_rpc_method_handler(
- servicer.stream_stream, request_deserializer=_REQUEST_DESERIALIZER,
- response_serializer=_RESPONSE_SERIALIZER),
- }
- return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
+ method_handlers = {
+ _UNARY_UNARY: grpc.unary_unary_rpc_method_handler(
+ servicer.unary_unary,
+ request_deserializer=_REQUEST_DESERIALIZER,
+ response_serializer=_RESPONSE_SERIALIZER),
+ _UNARY_STREAM:
+ grpc.unary_stream_rpc_method_handler(servicer.unary_stream),
+ _STREAM_UNARY:
+ grpc.stream_unary_rpc_method_handler(servicer.stream_unary),
+ _STREAM_STREAM: grpc.stream_stream_rpc_method_handler(
+ servicer.stream_stream,
+ request_deserializer=_REQUEST_DESERIALIZER,
+ response_serializer=_RESPONSE_SERIALIZER),
+ }
+ return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
class MetadataCodeDetailsTest(unittest.TestCase):
- def setUp(self):
- self._servicer = _Servicer()
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(
- self._server_pool, handlers=(_generic_handler(self._servicer),))
- port = self._server.add_insecure_port('[::]:0')
- self._server.start()
-
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- self._unary_unary = channel.unary_unary(
- '/'.join(('', _SERVICE, _UNARY_UNARY,)),
- request_serializer=_REQUEST_SERIALIZER,
- response_deserializer=_RESPONSE_DESERIALIZER,)
- self._unary_stream = channel.unary_stream(
- '/'.join(('', _SERVICE, _UNARY_STREAM,)),)
- self._stream_unary = channel.stream_unary(
- '/'.join(('', _SERVICE, _STREAM_UNARY,)),)
- self._stream_stream = channel.stream_stream(
- '/'.join(('', _SERVICE, _STREAM_STREAM,)),
- request_serializer=_REQUEST_SERIALIZER,
- response_deserializer=_RESPONSE_DESERIALIZER,)
-
-
- def testSuccessfulUnaryUnary(self):
- self._servicer.set_details(_DETAILS)
-
- unused_response, call = self._unary_unary.with_call(
- object(), metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(grpc.StatusCode.OK, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testSuccessfulUnaryStream(self):
- self._servicer.set_details(_DETAILS)
-
- call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(grpc.StatusCode.OK, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testSuccessfulStreamUnary(self):
- self._servicer.set_details(_DETAILS)
-
- unused_response, call = self._stream_unary.with_call(
- iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(grpc.StatusCode.OK, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testSuccessfulStreamStream(self):
- self._servicer.set_details(_DETAILS)
-
- call = self._stream_stream(
- iter([object()] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(grpc.StatusCode.OK, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testCustomCodeUnaryUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeUnaryStream(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
-
- call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- with self.assertRaises(grpc.RpcError):
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testCustomCodeStreamUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._stream_unary.with_call(
- iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeStreamStream(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
-
- call = self._stream_stream(
- iter([object()] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- with self.assertRaises(grpc.RpcError) as exception_context:
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeExceptionUnaryUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_exception()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeExceptionUnaryStream(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_exception()
-
- call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- with self.assertRaises(grpc.RpcError):
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testCustomCodeExceptionStreamUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_exception()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._stream_unary.with_call(
- iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeExceptionStreamStream(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_exception()
-
- call = self._stream_stream(
- iter([object()] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
- received_initial_metadata = call.initial_metadata()
- with self.assertRaises(grpc.RpcError):
- for _ in call:
- pass
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, received_initial_metadata))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, call.code())
- self.assertEqual(_DETAILS, call.details())
-
- def testCustomCodeReturnNoneUnaryUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_return_none()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
-
- def testCustomCodeReturnNoneStreamUnary(self):
- self._servicer.set_code(_NON_OK_CODE)
- self._servicer.set_details(_DETAILS)
- self._servicer.set_return_none()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._stream_unary.with_call(
- iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
-
- self.assertTrue(
- test_common.metadata_transmitted(
- _CLIENT_METADATA, self._servicer.received_client_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA,
- exception_context.exception.initial_metadata()))
- self.assertTrue(
- test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA,
- exception_context.exception.trailing_metadata()))
- self.assertIs(_NON_OK_CODE, exception_context.exception.code())
- self.assertEqual(_DETAILS, exception_context.exception.details())
+ def setUp(self):
+ self._servicer = _Servicer()
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(
+ self._server_pool, handlers=(_generic_handler(self._servicer),))
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ self._unary_unary = channel.unary_unary(
+ '/'.join((
+ '',
+ _SERVICE,
+ _UNARY_UNARY,)),
+ request_serializer=_REQUEST_SERIALIZER,
+ response_deserializer=_RESPONSE_DESERIALIZER,)
+ self._unary_stream = channel.unary_stream('/'.join((
+ '',
+ _SERVICE,
+ _UNARY_STREAM,)),)
+ self._stream_unary = channel.stream_unary('/'.join((
+ '',
+ _SERVICE,
+ _STREAM_UNARY,)),)
+ self._stream_stream = channel.stream_stream(
+ '/'.join((
+ '',
+ _SERVICE,
+ _STREAM_STREAM,)),
+ request_serializer=_REQUEST_SERIALIZER,
+ response_deserializer=_RESPONSE_DESERIALIZER,)
+
+ def testSuccessfulUnaryUnary(self):
+ self._servicer.set_details(_DETAILS)
+
+ unused_response, call = self._unary_unary.with_call(
+ object(), metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(grpc.StatusCode.OK, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testSuccessfulUnaryStream(self):
+ self._servicer.set_details(_DETAILS)
+
+ call = self._unary_stream(
+ _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(grpc.StatusCode.OK, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testSuccessfulStreamUnary(self):
+ self._servicer.set_details(_DETAILS)
+
+ unused_response, call = self._stream_unary.with_call(
+ iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(grpc.StatusCode.OK, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testSuccessfulStreamStream(self):
+ self._servicer.set_details(_DETAILS)
+
+ call = self._stream_stream(
+ iter([object()] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(grpc.StatusCode.OK, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testCustomCodeUnaryUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeUnaryStream(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+
+ call = self._unary_stream(
+ _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ with self.assertRaises(grpc.RpcError):
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testCustomCodeStreamUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._stream_unary.with_call(
+ iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeStreamStream(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+
+ call = self._stream_stream(
+ iter([object()] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeExceptionUnaryUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_exception()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeExceptionUnaryStream(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_exception()
+
+ call = self._unary_stream(
+ _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ with self.assertRaises(grpc.RpcError):
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testCustomCodeExceptionStreamUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_exception()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._stream_unary.with_call(
+ iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeExceptionStreamStream(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_exception()
+
+ call = self._stream_stream(
+ iter([object()] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+ received_initial_metadata = call.initial_metadata()
+ with self.assertRaises(grpc.RpcError):
+ for _ in call:
+ pass
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ received_initial_metadata))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, call.code())
+ self.assertEqual(_DETAILS, call.details())
+
+ def testCustomCodeReturnNoneUnaryUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_return_none()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
+
+ def testCustomCodeReturnNoneStreamUnary(self):
+ self._servicer.set_code(_NON_OK_CODE)
+ self._servicer.set_details(_DETAILS)
+ self._servicer.set_return_none()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._stream_unary.with_call(
+ iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, self._servicer.received_client_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA,
+ exception_context.exception.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA,
+ exception_context.exception.trailing_metadata()))
+ self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+ self.assertEqual(_DETAILS, exception_context.exception.details())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index caba53ffcc..53fe7ba8aa 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -51,166 +51,174 @@ _STREAM_STREAM = '/test/StreamStream'
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
-_CLIENT_METADATA = (
- ('client-md-key', 'client-md-key'),
- ('client-md-key-bin', b'\x00\x01')
-)
+_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
+ ('client-md-key-bin', b'\x00\x01'))
_SERVER_INITIAL_METADATA = (
('server-initial-md-key', 'server-initial-md-value'),
- ('server-initial-md-key-bin', b'\x00\x02')
-)
+ ('server-initial-md-key-bin', b'\x00\x02'))
_SERVER_TRAILING_METADATA = (
('server-trailing-md-key', 'server-trailing-md-value'),
- ('server-trailing-md-key-bin', b'\x00\x03')
-)
+ ('server-trailing-md-key-bin', b'\x00\x03'))
def user_agent(metadata):
- for key, val in metadata:
- if key == 'user-agent':
- return val
- raise KeyError('No user agent!')
+ for key, val in metadata:
+ if key == 'user-agent':
+ return val
+ raise KeyError('No user agent!')
def validate_client_metadata(test, servicer_context):
- test.assertTrue(test_common.metadata_transmitted(
- _CLIENT_METADATA, servicer_context.invocation_metadata()))
- test.assertTrue(user_agent(servicer_context.invocation_metadata())
- .startswith('primary-agent ' + _USER_AGENT))
- test.assertTrue(user_agent(servicer_context.invocation_metadata())
- .endswith('secondary-agent'))
+ test.assertTrue(
+ test_common.metadata_transmitted(
+ _CLIENT_METADATA, servicer_context.invocation_metadata()))
+ test.assertTrue(
+ user_agent(servicer_context.invocation_metadata())
+ .startswith('primary-agent ' + _USER_AGENT))
+ test.assertTrue(
+ user_agent(servicer_context.invocation_metadata())
+ .endswith('secondary-agent'))
def handle_unary_unary(test, request, servicer_context):
- validate_client_metadata(test, servicer_context)
- servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- return _RESPONSE
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ return _RESPONSE
def handle_unary_stream(test, request, servicer_context):
- validate_client_metadata(test, servicer_context)
- servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- for _ in range(test_constants.STREAM_LENGTH):
- yield _RESPONSE
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
def handle_stream_unary(test, request_iterator, servicer_context):
- validate_client_metadata(test, servicer_context)
- servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- # TODO(issue:#6891) We should be able to remove this loop
- for request in request_iterator:
- pass
- return _RESPONSE
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ # TODO(issue:#6891) We should be able to remove this loop
+ for request in request_iterator:
+ pass
+ return _RESPONSE
def handle_stream_stream(test, request_iterator, servicer_context):
- validate_client_metadata(test, servicer_context)
- servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
- servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
- # TODO(issue:#6891) We should be able to remove this loop,
- # and replace with return; yield
- for request in request_iterator:
- yield _RESPONSE
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ # TODO(issue:#6891) We should be able to remove this loop,
+ # and replace with return; yield
+ for request in request_iterator:
+ yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
- def __init__(self, test, request_streaming, response_streaming):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = None
- self.response_serializer = None
- self.unary_unary = None
- self.unary_stream = None
- self.stream_unary = None
- self.stream_stream = None
- if self.request_streaming and self.response_streaming:
- self.stream_stream = lambda x, y: handle_stream_stream(test, x, y)
- elif self.request_streaming:
- self.stream_unary = lambda x, y: handle_stream_unary(test, x, y)
- elif self.response_streaming:
- self.unary_stream = lambda x, y: handle_unary_stream(test, x, y)
- else:
- self.unary_unary = lambda x, y: handle_unary_unary(test, x, y)
+ def __init__(self, test, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = lambda x, y: handle_stream_stream(test, x, y)
+ elif self.request_streaming:
+ self.stream_unary = lambda x, y: handle_stream_unary(test, x, y)
+ elif self.response_streaming:
+ self.unary_stream = lambda x, y: handle_unary_stream(test, x, y)
+ else:
+ self.unary_unary = lambda x, y: handle_unary_unary(test, x, y)
class _GenericHandler(grpc.GenericRpcHandler):
- def __init__(self, test):
- self._test = test
+ def __init__(self, test):
+ self._test = test
- def service(self, handler_call_details):
- if handler_call_details.method == _UNARY_UNARY:
- return _MethodHandler(self._test, False, False)
- elif handler_call_details.method == _UNARY_STREAM:
- return _MethodHandler(self._test, False, True)
- elif handler_call_details.method == _STREAM_UNARY:
- return _MethodHandler(self._test, True, False)
- elif handler_call_details.method == _STREAM_STREAM:
- return _MethodHandler(self._test, True, True)
- else:
- return None
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(self._test, False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(self._test, False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(self._test, True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(self._test, True, True)
+ else:
+ return None
class MetadataTest(unittest.TestCase):
- def setUp(self):
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(
- self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),))
- port = self._server.add_insecure_port('[::]:0')
- self._server.start()
- self._channel = grpc.insecure_channel('localhost:%d' % port,
- options=_CHANNEL_ARGS)
-
- def tearDown(self):
- self._server.stop(0)
-
- def testUnaryUnary(self):
- multi_callable = self._channel.unary_unary(_UNARY_UNARY)
- unused_response, call = multi_callable.with_call(
- _REQUEST, metadata=_CLIENT_METADATA)
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
-
- def testUnaryStream(self):
- multi_callable = self._channel.unary_stream(_UNARY_STREAM)
- call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA)
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- for _ in call:
- pass
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
-
- def testStreamUnary(self):
- multi_callable = self._channel.stream_unary(_STREAM_UNARY)
- unused_response, call = multi_callable.with_call(
- iter([_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
-
- def testStreamStream(self):
- multi_callable = self._channel.stream_stream(_STREAM_STREAM)
- call = multi_callable(iter([_REQUEST] * test_constants.STREAM_LENGTH),
- metadata=_CLIENT_METADATA)
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_INITIAL_METADATA, call.initial_metadata()))
- for _ in call:
- pass
- self.assertTrue(test_common.metadata_transmitted(
- _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(
+ self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),))
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel(
+ 'localhost:%d' % port, options=_CHANNEL_ARGS)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testUnaryUnary(self):
+ multi_callable = self._channel.unary_unary(_UNARY_UNARY)
+ unused_response, call = multi_callable.with_call(
+ _REQUEST, metadata=_CLIENT_METADATA)
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+
+ def testUnaryStream(self):
+ multi_callable = self._channel.unary_stream(_UNARY_STREAM)
+ call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA)
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ for _ in call:
+ pass
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+
+ def testStreamUnary(self):
+ multi_callable = self._channel.stream_unary(_STREAM_UNARY)
+ unused_response, call = multi_callable.with_call(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
+
+ def testStreamStream(self):
+ multi_callable = self._channel.stream_stream(_STREAM_STREAM)
+ call = multi_callable(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ metadata=_CLIENT_METADATA)
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+ call.initial_metadata()))
+ for _ in call:
+ pass
+ self.assertTrue(
+ test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+ call.trailing_metadata()))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py
index eb00156da5..2cf6dfea62 100644
--- a/src/python/grpcio_tests/tests/unit/_rpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
@@ -53,742 +52,797 @@ _STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._value = None
- self._called = False
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
+ self._called = False
- def __call__(self, value):
- with self._condition:
- self._value = value
- self._called = True
- self._condition.notify_all()
+ def __call__(self, value):
+ with self._condition:
+ self._value = value
+ self._called = True
+ self._condition.notify_all()
- def value(self):
- with self._condition:
- while not self._called:
- self._condition.wait()
- return self._value
+ def value(self):
+ with self._condition:
+ while not self._called:
+ self._condition.wait()
+ return self._value
class _Handler(object):
- def __init__(self, control):
- self._control = control
-
- def handle_unary_unary(self, request, servicer_context):
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- return request
-
- def handle_unary_stream(self, request, servicer_context):
- for _ in range(test_constants.STREAM_LENGTH):
- self._control.control()
- yield request
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
-
- def handle_stream_unary(self, request_iterator, servicer_context):
- if servicer_context is not None:
- servicer_context.invocation_metadata()
- self._control.control()
- response_elements = []
- for request in request_iterator:
- self._control.control()
- response_elements.append(request)
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- return b''.join(response_elements)
-
- def handle_stream_stream(self, request_iterator, servicer_context):
- self._control.control()
- if servicer_context is not None:
- servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
- for request in request_iterator:
- self._control.control()
- yield request
- self._control.control()
+ def __init__(self, control):
+ self._control = control
+
+ def handle_unary_unary(self, request, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ return request
+
+ def handle_unary_stream(self, request, servicer_context):
+ for _ in range(test_constants.STREAM_LENGTH):
+ self._control.control()
+ yield request
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+
+ def handle_stream_unary(self, request_iterator, servicer_context):
+ if servicer_context is not None:
+ servicer_context.invocation_metadata()
+ self._control.control()
+ response_elements = []
+ for request in request_iterator:
+ self._control.control()
+ response_elements.append(request)
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ return b''.join(response_elements)
+
+ def handle_stream_stream(self, request_iterator, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((
+ 'testkey',
+ 'testvalue',),))
+ for request in request_iterator:
+ self._control.control()
+ yield request
+ self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
- def __init__(
- self, request_streaming, response_streaming, request_deserializer,
- response_serializer, unary_unary, unary_stream, stream_unary,
- stream_stream):
- self.request_streaming = request_streaming
- self.response_streaming = response_streaming
- self.request_deserializer = request_deserializer
- self.response_serializer = response_serializer
- self.unary_unary = unary_unary
- self.unary_stream = unary_stream
- self.stream_unary = stream_unary
- self.stream_stream = stream_stream
+ def __init__(self, request_streaming, response_streaming,
+ request_deserializer, response_serializer, unary_unary,
+ unary_stream, stream_unary, stream_stream):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = request_deserializer
+ self.response_serializer = response_serializer
+ self.unary_unary = unary_unary
+ self.unary_stream = unary_stream
+ self.stream_unary = stream_unary
+ self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
- def __init__(self, handler):
- self._handler = handler
-
- def service(self, handler_call_details):
- if handler_call_details.method == _UNARY_UNARY:
- return _MethodHandler(
- False, False, None, None, self._handler.handle_unary_unary, None,
- None, None)
- elif handler_call_details.method == _UNARY_STREAM:
- return _MethodHandler(
- False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None,
- self._handler.handle_unary_stream, None, None)
- elif handler_call_details.method == _STREAM_UNARY:
- return _MethodHandler(
- True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None,
- self._handler.handle_stream_unary, None)
- elif handler_call_details.method == _STREAM_STREAM:
- return _MethodHandler(
- True, True, None, None, None, None, None,
- self._handler.handle_stream_stream)
- else:
- return None
+ def __init__(self, handler):
+ self._handler = handler
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(False, False, None, None,
+ self._handler.handle_unary_unary, None, None,
+ None)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
+ _SERIALIZE_RESPONSE, None,
+ self._handler.handle_unary_stream, None, None)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
+ _SERIALIZE_RESPONSE, None, None,
+ self._handler.handle_stream_unary, None)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(True, True, None, None, None, None, None,
+ self._handler.handle_stream_stream)
+ else:
+ return None
def _unary_unary_multi_callable(channel):
- return channel.unary_unary(_UNARY_UNARY)
+ return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
- return channel.unary_stream(
- _UNARY_STREAM,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.unary_stream(
+ _UNARY_STREAM,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
- return channel.stream_unary(
- _STREAM_UNARY,
- request_serializer=_SERIALIZE_REQUEST,
- response_deserializer=_DESERIALIZE_RESPONSE)
+ return channel.stream_unary(
+ _STREAM_UNARY,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
- return channel.stream_stream(_STREAM_STREAM)
+ return channel.stream_stream(_STREAM_STREAM)
class RPCTest(unittest.TestCase):
- def setUp(self):
- self._control = test_control.PauseFailControl()
- self._handler = _Handler(self._control)
- self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ def setUp(self):
+ self._control = test_control.PauseFailControl()
+ self._handler = _Handler(self._control)
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- self._server = grpc.server(self._server_pool)
- port = self._server.add_insecure_port('[::]:0')
- self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
- self._server.start()
+ self._server = grpc.server(self._server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
+ self._server.start()
- self._channel = grpc.insecure_channel('localhost:%d' % port)
-
- def tearDown(self):
- self._server.stop(None)
- self._server_pool.shutdown(wait=True)
-
- def testUnrecognizedMethod(self):
- request = b'abc'
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- self._channel.unary_unary('NoSuchMethod')(request)
-
- self.assertEqual(
- grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code())
-
- def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
- request = b'\x07\x08'
- expected_response = self._handler.handle_unary_unary(request, None)
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- response = multi_callable(
- request, metadata=(
- ('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
-
- self.assertEqual(expected_response, response)
-
- def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
- request = b'\x07\x08'
- expected_response = self._handler.handle_unary_unary(request, None)
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- response, call = multi_callable.with_call(
- request, metadata=(
- ('test', 'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
-
- self.assertEqual(expected_response, response)
- self.assertIs(grpc.StatusCode.OK, call.code())
-
- def testSuccessfulUnaryRequestFutureUnaryResponse(self):
- request = b'\x07\x08'
- expected_response = self._handler.handle_unary_unary(request, None)
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- response_future = multi_callable.future(
- request, metadata=(
- ('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
- response = response_future.result()
-
- self.assertIsInstance(response_future, grpc.Future)
- self.assertIsInstance(response_future, grpc.Call)
- self.assertEqual(expected_response, response)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulUnaryRequestStreamResponse(self):
- request = b'\x37\x58'
- expected_responses = tuple(self._handler.handle_unary_stream(request, None))
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request,
- metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
- responses = tuple(response_iterator)
-
- self.assertSequenceEqual(expected_responses, responses)
-
- def testSuccessfulStreamRequestBlockingUnaryResponse(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- expected_response = self._handler.handle_stream_unary(iter(requests), None)
- request_iterator = iter(requests)
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- response = multi_callable(
- request_iterator,
- metadata=(('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
-
- self.assertEqual(expected_response, response)
-
- def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- expected_response = self._handler.handle_stream_unary(iter(requests), None)
- request_iterator = iter(requests)
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- response, call = multi_callable.with_call(
- request_iterator,
- metadata=(
- ('test', 'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),
- ))
-
- self.assertEqual(expected_response, response)
- self.assertIs(grpc.StatusCode.OK, call.code())
-
- def testSuccessfulStreamRequestFutureUnaryResponse(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- expected_response = self._handler.handle_stream_unary(iter(requests), None)
- request_iterator = iter(requests)
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- response_future = multi_callable.future(
- request_iterator,
- metadata=(
- ('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
- response = response_future.result()
-
- self.assertEqual(expected_response, response)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulStreamRequestStreamResponse(self):
- requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
- expected_responses = tuple(
- self._handler.handle_stream_stream(iter(requests), None))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request_iterator,
- metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
- responses = tuple(response_iterator)
-
- self.assertSequenceEqual(expected_responses, responses)
-
- def testSequentialInvocations(self):
- first_request = b'\x07\x08'
- second_request = b'\x0809'
- expected_first_response = self._handler.handle_unary_unary(
- first_request, None)
- expected_second_response = self._handler.handle_unary_unary(
- second_request, None)
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- first_response = multi_callable(
- first_request, metadata=(('test', 'SequentialInvocations'),))
- second_response = multi_callable(
- second_request, metadata=(('test', 'SequentialInvocations'),))
-
- self.assertEqual(expected_first_response, first_response)
- self.assertEqual(expected_second_response, second_response)
-
- def testConcurrentBlockingInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- expected_response = self._handler.handle_stream_unary(iter(requests), None)
- expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
- response_futures = [None] * test_constants.THREAD_CONCURRENCY
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- for index in range(test_constants.THREAD_CONCURRENCY):
- request_iterator = iter(requests)
- response_future = pool.submit(
- multi_callable, request_iterator,
- metadata=(('test', 'ConcurrentBlockingInvocations'),))
- response_futures[index] = response_future
- responses = tuple(
- response_future.result() for response_future in response_futures)
-
- pool.shutdown(wait=True)
- self.assertSequenceEqual(expected_responses, responses)
-
- def testConcurrentFutureInvocations(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- expected_response = self._handler.handle_stream_unary(iter(requests), None)
- expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
- response_futures = [None] * test_constants.THREAD_CONCURRENCY
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- for index in range(test_constants.THREAD_CONCURRENCY):
- request_iterator = iter(requests)
- response_future = multi_callable.future(
- request_iterator,
- metadata=(('test', 'ConcurrentFutureInvocations'),))
- response_futures[index] = response_future
- responses = tuple(
- response_future.result() for response_future in response_futures)
-
- self.assertSequenceEqual(expected_responses, responses)
-
- def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- request = b'\x67\x68'
- expected_response = self._handler.handle_unary_unary(request, None)
- response_futures = [None] * test_constants.THREAD_CONCURRENCY
- lock = threading.Lock()
- test_is_running_cell = [True]
- def wrap_future(future):
- def wrap():
- try:
- return future.result()
- except grpc.RpcError:
- with lock:
- if test_is_running_cell[0]:
- raise
- return None
- return wrap
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- for index in range(test_constants.THREAD_CONCURRENCY):
- inner_response_future = multi_callable.future(
- request,
- metadata=(
- ('test',
- 'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
- outer_response_future = pool.submit(wrap_future(inner_response_future))
- response_futures[index] = outer_response_future
-
- some_completed_response_futures_iterator = itertools.islice(
- futures.as_completed(response_futures),
- test_constants.THREAD_CONCURRENCY // 2)
- for response_future in some_completed_response_futures_iterator:
- self.assertEqual(expected_response, response_future.result())
- with lock:
- test_is_running_cell[0] = False
-
- def testConsumingOneStreamResponseUnaryRequest(self):
- request = b'\x57\x38'
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request,
- metadata=(
- ('test', 'ConsumingOneStreamResponseUnaryRequest'),))
- next(response_iterator)
-
- def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
- request = b'\x57\x38'
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request,
- metadata=(
- ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
- for _ in range(test_constants.STREAM_LENGTH // 2):
- next(response_iterator)
-
- def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
- requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request_iterator,
- metadata=(
- ('test', 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
- for _ in range(test_constants.STREAM_LENGTH // 2):
- next(response_iterator)
-
- def testConsumingTooManyStreamResponsesStreamRequest(self):
- requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- response_iterator = multi_callable(
- request_iterator,
- metadata=(
- ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
- for _ in range(test_constants.STREAM_LENGTH):
- next(response_iterator)
- for _ in range(test_constants.STREAM_LENGTH):
- with self.assertRaises(StopIteration):
- next(response_iterator)
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
- self.assertIsNotNone(response_iterator.initial_metadata())
- self.assertIs(grpc.StatusCode.OK, response_iterator.code())
- self.assertIsNotNone(response_iterator.details())
- self.assertIsNotNone(response_iterator.trailing_metadata())
-
- def testCancelledUnaryRequestUnaryResponse(self):
- request = b'\x07\x17'
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- with self._control.pause():
- response_future = multi_callable.future(
- request,
- metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
- response_future.cancel()
-
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.result()
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.exception()
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.traceback()
- self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
-
- def testCancelledUnaryRequestStreamResponse(self):
- request = b'\x07\x19'
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- with self._control.pause():
- response_iterator = multi_callable(
- request,
- metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
- self._control.block_until_paused()
- response_iterator.cancel()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- next(response_iterator)
- self.assertIs(grpc.StatusCode.CANCELLED, exception_context.exception.code())
- self.assertIsNotNone(response_iterator.initial_metadata())
- self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
- self.assertIsNotNone(response_iterator.details())
- self.assertIsNotNone(response_iterator.trailing_metadata())
-
- def testCancelledStreamRequestUnaryResponse(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self._control.pause():
- response_future = multi_callable.future(
- request_iterator,
- metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
- self._control.block_until_paused()
- response_future.cancel()
-
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.result()
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.exception()
- with self.assertRaises(grpc.FutureCancelledError):
- response_future.traceback()
- self.assertIsNotNone(response_future.initial_metadata())
- self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
- self.assertIsNotNone(response_future.details())
- self.assertIsNotNone(response_future.trailing_metadata())
-
- def testCancelledStreamRequestStreamResponse(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- with self._control.pause():
- response_iterator = multi_callable(
- request_iterator,
- metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
- response_iterator.cancel()
-
- with self.assertRaises(grpc.RpcError):
- next(response_iterator)
- self.assertIsNotNone(response_iterator.initial_metadata())
- self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
- self.assertIsNotNone(response_iterator.details())
- self.assertIsNotNone(response_iterator.trailing_metadata())
-
- def testExpiredUnaryRequestBlockingUnaryResponse(self):
- request = b'\x07\x17'
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- with self._control.pause():
- with self.assertRaises(grpc.RpcError) as exception_context:
- multi_callable.with_call(
- request, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
-
- self.assertIsInstance(exception_context.exception, grpc.Call)
- self.assertIsNotNone(exception_context.exception.initial_metadata())
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIsNotNone(exception_context.exception.details())
- self.assertIsNotNone(exception_context.exception.trailing_metadata())
-
- def testExpiredUnaryRequestFutureUnaryResponse(self):
- request = b'\x07\x17'
- callback = _Callback()
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- with self._control.pause():
- response_future = multi_callable.future(
- request, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
- response_future.add_done_callback(callback)
- value_passed_to_callback = callback.value()
-
- self.assertIs(response_future, value_passed_to_callback)
- self.assertIsNotNone(response_future.initial_metadata())
- self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
- self.assertIsNotNone(response_future.details())
- self.assertIsNotNone(response_future.trailing_metadata())
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIsInstance(response_future.exception(), grpc.RpcError)
- self.assertIsNotNone(response_future.traceback())
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code())
-
- def testExpiredUnaryRequestStreamResponse(self):
- request = b'\x07\x19'
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- with self._control.pause():
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_iterator = multi_callable(
- request, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
- next(response_iterator)
+ def tearDown(self):
+ self._server.stop(None)
+ self._server_pool.shutdown(wait=True)
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
+ def testUnrecognizedMethod(self):
+ request = b'abc'
- def testExpiredStreamRequestBlockingUnaryResponse(self):
- requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._channel.unary_unary('NoSuchMethod')(request)
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self._control.pause():
- with self.assertRaises(grpc.RpcError) as exception_context:
- multi_callable(
- request_iterator, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
-
- self.assertIsInstance(exception_context.exception, grpc.RpcError)
- self.assertIsInstance(exception_context.exception, grpc.Call)
- self.assertIsNotNone(exception_context.exception.initial_metadata())
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIsNotNone(exception_context.exception.details())
- self.assertIsNotNone(exception_context.exception.trailing_metadata())
-
- def testExpiredStreamRequestFutureUnaryResponse(self):
- requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
- callback = _Callback()
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self._control.pause():
- response_future = multi_callable.future(
- request_iterator, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
- with self.assertRaises(grpc.FutureTimeoutError):
- response_future.result(timeout=test_constants.SHORT_TIMEOUT / 2.0)
- response_future.add_done_callback(callback)
- value_passed_to_callback = callback.value()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIsInstance(response_future.exception(), grpc.RpcError)
- self.assertIsNotNone(response_future.traceback())
- self.assertIs(response_future, value_passed_to_callback)
- self.assertIsNotNone(response_future.initial_metadata())
- self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
- self.assertIsNotNone(response_future.details())
- self.assertIsNotNone(response_future.trailing_metadata())
-
- def testExpiredStreamRequestStreamResponse(self):
- requests = tuple(b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- with self._control.pause():
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_iterator = multi_callable(
- request_iterator, timeout=test_constants.SHORT_TIMEOUT,
- metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
- next(response_iterator)
+ self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
+ exception_context.exception.code())
+
+ def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response = multi_callable(
+ request,
+ metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
+
+ self.assertEqual(expected_response, response)
- self.assertIs(
- grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
- self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
+ def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
- def testFailedUnaryRequestBlockingUnaryResponse(self):
- request = b'\x37\x17'
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response, call = multi_callable.with_call(
+ request,
+ metadata=(('test',
+ 'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
+
+ self.assertEqual(expected_response, response)
+ self.assertIs(grpc.StatusCode.OK, call.code())
+
+ def testSuccessfulUnaryRequestFutureUnaryResponse(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
- multi_callable = _unary_unary_multi_callable(self._channel)
- with self._control.fail():
- with self.assertRaises(grpc.RpcError) as exception_context:
- multi_callable.with_call(
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response_future = multi_callable.future(
request,
- metadata=(('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
-
- self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
-
- def testFailedUnaryRequestFutureUnaryResponse(self):
- request = b'\x37\x17'
- callback = _Callback()
-
- multi_callable = _unary_unary_multi_callable(self._channel)
- with self._control.fail():
- response_future = multi_callable.future(
- request,
- metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
- response_future.add_done_callback(callback)
- value_passed_to_callback = callback.value()
-
- self.assertIsInstance(response_future, grpc.Future)
- self.assertIsInstance(response_future, grpc.Call)
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIs(
- grpc.StatusCode.UNKNOWN, exception_context.exception.code())
- self.assertIsInstance(response_future.exception(), grpc.RpcError)
- self.assertIsNotNone(response_future.traceback())
- self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code())
- self.assertIs(response_future, value_passed_to_callback)
-
- def testFailedUnaryRequestStreamResponse(self):
- request = b'\x37\x17'
-
- multi_callable = _unary_stream_multi_callable(self._channel)
- with self.assertRaises(grpc.RpcError) as exception_context:
- with self._control.fail():
+ metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
+ response = response_future.result()
+
+ self.assertIsInstance(response_future, grpc.Future)
+ self.assertIsInstance(response_future, grpc.Call)
+ self.assertEqual(expected_response, response)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ request = b'\x37\x58'
+ expected_responses = tuple(
+ self._handler.handle_unary_stream(request, None))
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
- metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
- next(response_iterator)
+ metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
+ responses = tuple(response_iterator)
- self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+ self.assertSequenceEqual(expected_responses, responses)
- def testFailedStreamRequestBlockingUnaryResponse(self):
- requests = tuple(b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
+ def testSuccessfulStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(
+ iter(requests), None)
+ request_iterator = iter(requests)
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self._control.fail():
- with self.assertRaises(grpc.RpcError) as exception_context:
- multi_callable(
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response = multi_callable(
+ request_iterator,
+ metadata=(
+ ('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
+
+ self.assertEqual(expected_response, response)
+
+ def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(
+ iter(requests), None)
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response, call = multi_callable.with_call(
+ request_iterator,
+ metadata=(
+ ('test',
+ 'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
+
+ self.assertEqual(expected_response, response)
+ self.assertIs(grpc.StatusCode.OK, call.code())
+
+ def testSuccessfulStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(
+ iter(requests), None)
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response_future = multi_callable.future(
request_iterator,
- metadata=(('test', 'FailedStreamRequestBlockingUnaryResponse'),))
-
- self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
-
- def testFailedStreamRequestFutureUnaryResponse(self):
- requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
- callback = _Callback()
-
- multi_callable = _stream_unary_multi_callable(self._channel)
- with self._control.fail():
- response_future = multi_callable.future(
- request_iterator,
- metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
- response_future.add_done_callback(callback)
- value_passed_to_callback = callback.value()
-
- with self.assertRaises(grpc.RpcError) as exception_context:
- response_future.result()
- self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
- self.assertIs(
- grpc.StatusCode.UNKNOWN, exception_context.exception.code())
- self.assertIsInstance(response_future.exception(), grpc.RpcError)
- self.assertIsNotNone(response_future.traceback())
- self.assertIs(response_future, value_passed_to_callback)
-
- def testFailedStreamRequestStreamResponse(self):
- requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
-
- multi_callable = _stream_stream_multi_callable(self._channel)
- with self._control.fail():
- with self.assertRaises(grpc.RpcError) as exception_context:
+ metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
+ response = response_future.result()
+
+ self.assertEqual(expected_response, response)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ requests = tuple(b'\x77\x58'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_responses = tuple(
+ self._handler.handle_stream_stream(iter(requests), None))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
- metadata=(('test', 'FailedStreamRequestStreamResponse'),))
- tuple(response_iterator)
+ metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
+ responses = tuple(response_iterator)
+
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testSequentialInvocations(self):
+ first_request = b'\x07\x08'
+ second_request = b'\x0809'
+ expected_first_response = self._handler.handle_unary_unary(
+ first_request, None)
+ expected_second_response = self._handler.handle_unary_unary(
+ second_request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ first_response = multi_callable(
+ first_request, metadata=(('test', 'SequentialInvocations'),))
+ second_response = multi_callable(
+ second_request, metadata=(('test', 'SequentialInvocations'),))
+
+ self.assertEqual(expected_first_response, first_response)
+ self.assertEqual(expected_second_response, second_response)
+
+ def testConcurrentBlockingInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(
+ iter(requests), None)
+ expected_responses = [expected_response
+ ] * test_constants.THREAD_CONCURRENCY
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request_iterator = iter(requests)
+ response_future = pool.submit(
+ multi_callable,
+ request_iterator,
+ metadata=(('test', 'ConcurrentBlockingInvocations'),))
+ response_futures[index] = response_future
+ responses = tuple(response_future.result()
+ for response_future in response_futures)
+
+ pool.shutdown(wait=True)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testConcurrentFutureInvocations(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(
+ iter(requests), None)
+ expected_responses = [expected_response
+ ] * test_constants.THREAD_CONCURRENCY
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request_iterator = iter(requests)
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=(('test', 'ConcurrentFutureInvocations'),))
+ response_futures[index] = response_future
+ responses = tuple(response_future.result()
+ for response_future in response_futures)
+
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ request = b'\x67\x68'
+ expected_response = self._handler.handle_unary_unary(request, None)
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+ lock = threading.Lock()
+ test_is_running_cell = [True]
+
+ def wrap_future(future):
+
+ def wrap():
+ try:
+ return future.result()
+ except grpc.RpcError:
+ with lock:
+ if test_is_running_cell[0]:
+ raise
+ return None
+
+ return wrap
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ inner_response_future = multi_callable.future(
+ request,
+ metadata=(
+ ('test',
+ 'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
+ outer_response_future = pool.submit(
+ wrap_future(inner_response_future))
+ response_futures[index] = outer_response_future
+
+ some_completed_response_futures_iterator = itertools.islice(
+ futures.as_completed(response_futures),
+ test_constants.THREAD_CONCURRENCY // 2)
+ for response_future in some_completed_response_futures_iterator:
+ self.assertEqual(expected_response, response_future.result())
+ with lock:
+ test_is_running_cell[0] = False
+
+ def testConsumingOneStreamResponseUnaryRequest(self):
+ request = b'\x57\x38'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request,
+ metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),))
+ next(response_iterator)
+
+ def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
+ request = b'\x57\x38'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request,
+ metadata=(
+ ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ next(response_iterator)
- self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
- self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
+ def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
+ requests = tuple(b'\x67\x88'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
- def testIgnoredUnaryRequestFutureUnaryResponse(self):
- request = b'\x37\x17'
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(('test',
+ 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ next(response_iterator)
- multi_callable = _unary_unary_multi_callable(self._channel)
- multi_callable.future(
- request,
- metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
+ def testConsumingTooManyStreamResponsesStreamRequest(self):
+ requests = tuple(b'\x67\x88'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
- def testIgnoredUnaryRequestStreamResponse(self):
- request = b'\x37\x17'
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(
+ ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH):
+ next(response_iterator)
+ for _ in range(test_constants.STREAM_LENGTH):
+ with self.assertRaises(StopIteration):
+ next(response_iterator)
+
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.OK, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testCancelledUnaryRequestUnaryResponse(self):
+ request = b'\x07\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request,
+ metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
+ response_future.cancel()
+
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.exception()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.traceback()
+ self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
+
+ def testCancelledUnaryRequestStreamResponse(self):
+ request = b'\x07\x19'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self._control.pause():
+ response_iterator = multi_callable(
+ request,
+ metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
+ self._control.block_until_paused()
+ response_iterator.cancel()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(response_iterator)
+ self.assertIs(grpc.StatusCode.CANCELLED,
+ exception_context.exception.code())
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testCancelledStreamRequestUnaryResponse(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
+ self._control.block_until_paused()
+ response_future.cancel()
+
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.exception()
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.traceback()
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+
+ def testCancelledStreamRequestStreamResponse(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.pause():
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
+ response_iterator.cancel()
+
+ with self.assertRaises(grpc.RpcError):
+ next(response_iterator)
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testExpiredUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x07\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable.with_call(
+ request,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(
+ ('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
+
+ self.assertIsInstance(exception_context.exception, grpc.Call)
+ self.assertIsNotNone(exception_context.exception.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIsNotNone(exception_context.exception.details())
+ self.assertIsNotNone(exception_context.exception.trailing_metadata())
+
+ def testExpiredUnaryRequestFutureUnaryResponse(self):
+ request = b'\x07\x17'
+ callback = _Callback()
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ self.assertIs(response_future, value_passed_to_callback)
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ response_future.exception().code())
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ request = b'\x07\x19'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ response_iterator.code())
+
+ def testExpiredStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x07\x08'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request_iterator,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(
+ ('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
+
+ self.assertIsInstance(exception_context.exception, grpc.RpcError)
+ self.assertIsInstance(exception_context.exception, grpc.Call)
+ self.assertIsNotNone(exception_context.exception.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIsNotNone(exception_context.exception.details())
+ self.assertIsNotNone(exception_context.exception.trailing_metadata())
+
+ def testExpiredStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+ callback = _Callback()
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request_iterator,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
+ with self.assertRaises(grpc.FutureTimeoutError):
+ response_future.result(timeout=test_constants.SHORT_TIMEOUT /
+ 2.0)
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIs(response_future, value_passed_to_callback)
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+
+ def testExpiredStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x18'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request_iterator,
+ timeout=test_constants.SHORT_TIMEOUT,
+ metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
+ response_iterator.code())
+
+ def testFailedUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable.with_call(
+ request,
+ metadata=(
+ ('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
+
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+
+ def testFailedUnaryRequestFutureUnaryResponse(self):
+ request = b'\x37\x17'
+ callback = _Callback()
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.fail():
+ response_future = multi_callable.future(
+ request,
+ metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ self.assertIsInstance(response_future, grpc.Future)
+ self.assertIsInstance(response_future, grpc.Call)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ response_future.exception().code())
+ self.assertIs(response_future, value_passed_to_callback)
+
+ def testFailedUnaryRequestStreamResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ with self._control.fail():
+ response_iterator = multi_callable(
+ request,
+ metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+
+ def testFailedStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x47\x58'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request_iterator,
+ metadata=(
+ ('test', 'FailedStreamRequestBlockingUnaryResponse'),))
+
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+
+ def testFailedStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+ callback = _Callback()
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.fail():
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIs(response_future, value_passed_to_callback)
+
+ def testFailedStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x88'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(('test', 'FailedStreamRequestStreamResponse'),))
+ tuple(response_iterator)
+
+ self.assertIs(grpc.StatusCode.UNKNOWN,
+ exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
+
+ def testIgnoredUnaryRequestFutureUnaryResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ multi_callable.future(
+ request,
+ metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
- multi_callable = _unary_stream_multi_callable(self._channel)
- multi_callable(
- request,
- metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
+ def testIgnoredUnaryRequestStreamResponse(self):
+ request = b'\x37\x17'
- def testIgnoredStreamRequestFutureUnaryResponse(self):
- requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ multi_callable(
+ request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
+
+ def testIgnoredStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
- multi_callable = _stream_unary_multi_callable(self._channel)
- multi_callable.future(
- request_iterator,
- metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ multi_callable.future(
+ request_iterator,
+ metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
- def testIgnoredStreamRequestStreamResponse(self):
- requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
- request_iterator = iter(requests)
+ def testIgnoredStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x88'
+ for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
- multi_callable = _stream_stream_multi_callable(self._channel)
- multi_callable(
- request_iterator,
- metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ multi_callable(
+ request_iterator,
+ metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_sanity/__init__.py b/src/python/grpcio_tests/tests/unit/_sanity/__init__.py
index 2f88fa0412..100a624dc9 100644
--- a/src/python/grpcio_tests/tests/unit/_sanity/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/_sanity/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
index e9fdf217ae..0fbe6a2b5d 100644
--- a/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
@@ -38,21 +38,23 @@ import tests
class Sanity(unittest.TestCase):
- def testTestsJsonUpToDate(self):
- """Autodiscovers all test suites and checks that tests.json is up to date"""
- loader = tests.Loader()
- loader.loadTestsFromNames(['tests'])
- test_suite_names = [
- test_case_class.id().rsplit('.', 1)[0]
- for test_case_class in tests._loader.iterate_suite_cases(loader.suite)]
- test_suite_names = sorted(set(test_suite_names))
-
- tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
- if six.PY3:
- tests_json_string = tests_json_string.decode()
- tests_json = json.loads(tests_json_string)
- self.assertListEqual(test_suite_names, tests_json)
+ def testTestsJsonUpToDate(self):
+ """Autodiscovers all test suites and checks that tests.json is up to date"""
+ loader = tests.Loader()
+ loader.loadTestsFromNames(['tests'])
+ test_suite_names = [
+ test_case_class.id().rsplit('.', 1)[0]
+ for test_case_class in tests._loader.iterate_suite_cases(
+ loader.suite)
+ ]
+ test_suite_names = sorted(set(test_suite_names))
+
+ tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
+ if six.PY3:
+ tests_json_string = tests_json_string.decode()
+ tests_json = json.loads(tests_json_string)
+ self.assertListEqual(test_suite_names, tests_json)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
index 3e4f317edc..be3522f46f 100644
--- a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
+++ b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
@@ -40,78 +40,89 @@ _EPSILON = 0.1
def cleanup(timeout):
- if timeout is not None:
- time.sleep(timeout)
- else:
- time.sleep(_LONG_TIME)
+ if timeout is not None:
+ time.sleep(timeout)
+ else:
+ time.sleep(_LONG_TIME)
def slow_cleanup(timeout):
- # Don't respect timeout
- time.sleep(_LONG_TIME)
+ # Don't respect timeout
+ time.sleep(_LONG_TIME)
class CleanupThreadTest(unittest.TestCase):
- def testTargetInvocation(self):
- event = threading.Event()
- def target(arg1, arg2, arg3=None):
- self.assertEqual('arg1', arg1)
- self.assertEqual('arg2', arg2)
- self.assertEqual('arg3', arg3)
- event.set()
-
- cleanup_thread = _common.CleanupThread(behavior=lambda x: None,
- target=target, name='test-name',
- args=('arg1', 'arg2'), kwargs={'arg3': 'arg3'})
- cleanup_thread.start()
- cleanup_thread.join()
- self.assertEqual(cleanup_thread.name, 'test-name')
- self.assertTrue(event.is_set())
-
- def testJoinNoTimeout(self):
- cleanup_thread = _common.CleanupThread(behavior=cleanup)
- cleanup_thread.start()
- start_time = time.time()
- cleanup_thread.join()
- end_time = time.time()
- self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON)
-
- def testJoinTimeout(self):
- cleanup_thread = _common.CleanupThread(behavior=cleanup)
- cleanup_thread.start()
- start_time = time.time()
- cleanup_thread.join(_SHORT_TIME)
- end_time = time.time()
- self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON)
-
- def testJoinTimeoutSlowBehavior(self):
- cleanup_thread = _common.CleanupThread(behavior=slow_cleanup)
- cleanup_thread.start()
- start_time = time.time()
- cleanup_thread.join(_SHORT_TIME)
- end_time = time.time()
- self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON)
-
- def testJoinTimeoutSlowTarget(self):
- event = threading.Event()
- def target():
- event.wait(_LONG_TIME)
- cleanup_thread = _common.CleanupThread(behavior=cleanup, target=target)
- cleanup_thread.start()
- start_time = time.time()
- cleanup_thread.join(_SHORT_TIME)
- end_time = time.time()
- self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON)
- event.set()
-
- def testJoinZeroTimeout(self):
- cleanup_thread = _common.CleanupThread(behavior=cleanup)
- cleanup_thread.start()
- start_time = time.time()
- cleanup_thread.join(0)
- end_time = time.time()
- self.assertAlmostEqual(0, end_time - start_time, delta=_EPSILON)
+ def testTargetInvocation(self):
+ event = threading.Event()
+
+ def target(arg1, arg2, arg3=None):
+ self.assertEqual('arg1', arg1)
+ self.assertEqual('arg2', arg2)
+ self.assertEqual('arg3', arg3)
+ event.set()
+
+ cleanup_thread = _common.CleanupThread(
+ behavior=lambda x: None,
+ target=target,
+ name='test-name',
+ args=('arg1', 'arg2'),
+ kwargs={'arg3': 'arg3'})
+ cleanup_thread.start()
+ cleanup_thread.join()
+ self.assertEqual(cleanup_thread.name, 'test-name')
+ self.assertTrue(event.is_set())
+
+ def testJoinNoTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join()
+ end_time = time.time()
+ self.assertAlmostEqual(
+ _LONG_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(
+ _SHORT_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeoutSlowBehavior(self):
+ cleanup_thread = _common.CleanupThread(behavior=slow_cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(
+ _LONG_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeoutSlowTarget(self):
+ event = threading.Event()
+
+ def target():
+ event.wait(_LONG_TIME)
+
+ cleanup_thread = _common.CleanupThread(behavior=cleanup, target=target)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(
+ _SHORT_TIME, end_time - start_time, delta=_EPSILON)
+ event.set()
+
+ def testJoinZeroTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(0)
+ end_time = time.time()
+ self.assertAlmostEqual(0, end_time - start_time, delta=_EPSILON)
+
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_thread_pool.py b/src/python/grpcio_tests/tests/unit/_thread_pool.py
index f13cc2f86f..fad2e1c8f6 100644
--- a/src/python/grpcio_tests/tests/unit/_thread_pool.py
+++ b/src/python/grpcio_tests/tests/unit/_thread_pool.py
@@ -32,17 +32,18 @@ from concurrent import futures
class RecordingThreadPool(futures.Executor):
- """A thread pool that records if used."""
- def __init__(self, max_workers):
- self._tp_executor = futures.ThreadPoolExecutor(max_workers=max_workers)
- self._lock = threading.Lock()
- self._was_used = False
+ """A thread pool that records if used."""
- def submit(self, fn, *args, **kwargs):
- with self._lock:
- self._was_used = True
- self._tp_executor.submit(fn, *args, **kwargs)
+ def __init__(self, max_workers):
+ self._tp_executor = futures.ThreadPoolExecutor(max_workers=max_workers)
+ self._lock = threading.Lock()
+ self._was_used = False
- def was_used(self):
- with self._lock:
- return self._was_used
+ def submit(self, fn, *args, **kwargs):
+ with self._lock:
+ self._was_used = True
+ self._tp_executor.submit(fn, *args, **kwargs)
+
+ def was_used(self):
+ with self._lock:
+ return self._was_used
diff --git a/src/python/grpcio_tests/tests/unit/beta/__init__.py b/src/python/grpcio_tests/tests/unit/beta/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/beta/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/beta/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
index 3a9701b8eb..b5fdac26c1 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests Face interface compliance of the gRPC Python Beta API."""
import threading
@@ -57,290 +56,303 @@ _RESPONSE = b'123'
class _Servicer(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._peer = None
- self._serviced = False
-
- def unary_unary(self, request, context):
- with self._condition:
- self._request = request
- self._peer = context.protocol_context().peer()
- self._invocation_metadata = context.invocation_metadata()
- context.protocol_context().disable_next_response_compression()
- self._serviced = True
- self._condition.notify_all()
- return _RESPONSE
-
- def unary_stream(self, request, context):
- with self._condition:
- self._request = request
- self._peer = context.protocol_context().peer()
- self._invocation_metadata = context.invocation_metadata()
- context.protocol_context().disable_next_response_compression()
- self._serviced = True
- self._condition.notify_all()
- return
- yield
-
- def stream_unary(self, request_iterator, context):
- for request in request_iterator:
- self._request = request
- with self._condition:
- self._peer = context.protocol_context().peer()
- self._invocation_metadata = context.invocation_metadata()
- context.protocol_context().disable_next_response_compression()
- self._serviced = True
- self._condition.notify_all()
- return _RESPONSE
-
- def stream_stream(self, request_iterator, context):
- for request in request_iterator:
- with self._condition:
- self._peer = context.protocol_context().peer()
- context.protocol_context().disable_next_response_compression()
- yield _RESPONSE
- with self._condition:
- self._invocation_metadata = context.invocation_metadata()
- self._serviced = True
- self._condition.notify_all()
-
- def peer(self):
- with self._condition:
- return self._peer
-
- def block_until_serviced(self):
- with self._condition:
- while not self._serviced:
- self._condition.wait()
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._peer = None
+ self._serviced = False
+
+ def unary_unary(self, request, context):
+ with self._condition:
+ self._request = request
+ self._peer = context.protocol_context().peer()
+ self._invocation_metadata = context.invocation_metadata()
+ context.protocol_context().disable_next_response_compression()
+ self._serviced = True
+ self._condition.notify_all()
+ return _RESPONSE
+
+ def unary_stream(self, request, context):
+ with self._condition:
+ self._request = request
+ self._peer = context.protocol_context().peer()
+ self._invocation_metadata = context.invocation_metadata()
+ context.protocol_context().disable_next_response_compression()
+ self._serviced = True
+ self._condition.notify_all()
+ return
+ yield
+
+ def stream_unary(self, request_iterator, context):
+ for request in request_iterator:
+ self._request = request
+ with self._condition:
+ self._peer = context.protocol_context().peer()
+ self._invocation_metadata = context.invocation_metadata()
+ context.protocol_context().disable_next_response_compression()
+ self._serviced = True
+ self._condition.notify_all()
+ return _RESPONSE
+
+ def stream_stream(self, request_iterator, context):
+ for request in request_iterator:
+ with self._condition:
+ self._peer = context.protocol_context().peer()
+ context.protocol_context().disable_next_response_compression()
+ yield _RESPONSE
+ with self._condition:
+ self._invocation_metadata = context.invocation_metadata()
+ self._serviced = True
+ self._condition.notify_all()
+
+ def peer(self):
+ with self._condition:
+ return self._peer
+
+ def block_until_serviced(self):
+ with self._condition:
+ while not self._serviced:
+ self._condition.wait()
class _BlockingIterator(object):
- def __init__(self, upstream):
- self._condition = threading.Condition()
- self._upstream = upstream
- self._allowed = []
+ def __init__(self, upstream):
+ self._condition = threading.Condition()
+ self._upstream = upstream
+ self._allowed = []
- def __iter__(self):
- return self
+ def __iter__(self):
+ return self
- def __next__(self):
- return self.next()
+ def __next__(self):
+ return self.next()
- def next(self):
- with self._condition:
- while True:
- if self._allowed is None:
- raise StopIteration()
- elif self._allowed:
- return self._allowed.pop(0)
- else:
- self._condition.wait()
+ def next(self):
+ with self._condition:
+ while True:
+ if self._allowed is None:
+ raise StopIteration()
+ elif self._allowed:
+ return self._allowed.pop(0)
+ else:
+ self._condition.wait()
- def allow(self):
- with self._condition:
- try:
- self._allowed.append(next(self._upstream))
- except StopIteration:
- self._allowed = None
- self._condition.notify_all()
+ def allow(self):
+ with self._condition:
+ try:
+ self._allowed.append(next(self._upstream))
+ except StopIteration:
+ self._allowed = None
+ self._condition.notify_all()
def _metadata_plugin(context, callback):
- callback([(_PER_RPC_CREDENTIALS_METADATA_KEY,
- _PER_RPC_CREDENTIALS_METADATA_VALUE)], None)
+ callback([(_PER_RPC_CREDENTIALS_METADATA_KEY,
+ _PER_RPC_CREDENTIALS_METADATA_VALUE)], None)
class BetaFeaturesTest(unittest.TestCase):
- def setUp(self):
- self._servicer = _Servicer()
- method_implementations = {
- (_GROUP, _UNARY_UNARY):
+ def setUp(self):
+ self._servicer = _Servicer()
+ method_implementations = {
+ (_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
- (_GROUP, _UNARY_STREAM):
+ (_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
- (_GROUP, _STREAM_UNARY):
+ (_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
- (_GROUP, _STREAM_STREAM):
+ (_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
- }
-
- cardinalities = {
- _UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
- _UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
- _STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
- _STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
- }
-
- server_options = implementations.server_options(
- thread_pool_size=test_constants.POOL_SIZE)
- self._server = implementations.server(
- method_implementations, options=server_options)
- server_credentials = implementations.ssl_server_credentials(
- [(resources.private_key(), resources.certificate_chain(),),])
- port = self._server.add_secure_port('[::]:0', server_credentials)
- self._server.start()
- self._channel_credentials = implementations.ssl_channel_credentials(
- resources.test_root_certificates())
- self._call_credentials = implementations.metadata_call_credentials(
- _metadata_plugin)
- channel = test_utilities.not_really_secure_channel(
- 'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
- stub_options = implementations.stub_options(
- thread_pool_size=test_constants.POOL_SIZE)
- self._dynamic_stub = implementations.dynamic_stub(
- channel, _GROUP, cardinalities, options=stub_options)
-
- def tearDown(self):
- self._dynamic_stub = None
- self._server.stop(test_constants.SHORT_TIMEOUT).wait()
-
- def test_unary_unary(self):
- call_options = interfaces.grpc_call_options(
- disable_compression=True, credentials=self._call_credentials)
- response = getattr(self._dynamic_stub, _UNARY_UNARY)(
- _REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
- self.assertEqual(_RESPONSE, response)
- self.assertIsNotNone(self._servicer.peer())
- invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
- self._servicer._invocation_metadata]
- self.assertIn(
- (_PER_RPC_CREDENTIALS_METADATA_KEY,
- _PER_RPC_CREDENTIALS_METADATA_VALUE),
- invocation_metadata)
-
- def test_unary_stream(self):
- call_options = interfaces.grpc_call_options(
- disable_compression=True, credentials=self._call_credentials)
- response_iterator = getattr(self._dynamic_stub, _UNARY_STREAM)(
- _REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
- self._servicer.block_until_serviced()
- self.assertIsNotNone(self._servicer.peer())
- invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
- self._servicer._invocation_metadata]
- self.assertIn(
- (_PER_RPC_CREDENTIALS_METADATA_KEY,
- _PER_RPC_CREDENTIALS_METADATA_VALUE),
- invocation_metadata)
-
- def test_stream_unary(self):
- call_options = interfaces.grpc_call_options(
- credentials=self._call_credentials)
- request_iterator = _BlockingIterator(iter((_REQUEST,)))
- response_future = getattr(self._dynamic_stub, _STREAM_UNARY).future(
- request_iterator, test_constants.LONG_TIMEOUT,
- protocol_options=call_options)
- response_future.protocol_context().disable_next_request_compression()
- request_iterator.allow()
- response_future.protocol_context().disable_next_request_compression()
- request_iterator.allow()
- self._servicer.block_until_serviced()
- self.assertIsNotNone(self._servicer.peer())
- self.assertEqual(_RESPONSE, response_future.result())
- invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
- self._servicer._invocation_metadata]
- self.assertIn(
- (_PER_RPC_CREDENTIALS_METADATA_KEY,
- _PER_RPC_CREDENTIALS_METADATA_VALUE),
- invocation_metadata)
-
- def test_stream_stream(self):
- call_options = interfaces.grpc_call_options(
- credentials=self._call_credentials)
- request_iterator = _BlockingIterator(iter((_REQUEST,)))
- response_iterator = getattr(self._dynamic_stub, _STREAM_STREAM)(
- request_iterator, test_constants.SHORT_TIMEOUT,
- protocol_options=call_options)
- response_iterator.protocol_context().disable_next_request_compression()
- request_iterator.allow()
- response = next(response_iterator)
- response_iterator.protocol_context().disable_next_request_compression()
- request_iterator.allow()
- self._servicer.block_until_serviced()
- self.assertIsNotNone(self._servicer.peer())
- self.assertEqual(_RESPONSE, response)
- invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
- self._servicer._invocation_metadata]
- self.assertIn(
- (_PER_RPC_CREDENTIALS_METADATA_KEY,
- _PER_RPC_CREDENTIALS_METADATA_VALUE),
- invocation_metadata)
+ }
+
+ cardinalities = {
+ _UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
+ _UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
+ _STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
+ _STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
+ }
+
+ server_options = implementations.server_options(
+ thread_pool_size=test_constants.POOL_SIZE)
+ self._server = implementations.server(
+ method_implementations, options=server_options)
+ server_credentials = implementations.ssl_server_credentials([(
+ resources.private_key(),
+ resources.certificate_chain(),),])
+ port = self._server.add_secure_port('[::]:0', server_credentials)
+ self._server.start()
+ self._channel_credentials = implementations.ssl_channel_credentials(
+ resources.test_root_certificates())
+ self._call_credentials = implementations.metadata_call_credentials(
+ _metadata_plugin)
+ channel = test_utilities.not_really_secure_channel(
+ 'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
+ stub_options = implementations.stub_options(
+ thread_pool_size=test_constants.POOL_SIZE)
+ self._dynamic_stub = implementations.dynamic_stub(
+ channel, _GROUP, cardinalities, options=stub_options)
+
+ def tearDown(self):
+ self._dynamic_stub = None
+ self._server.stop(test_constants.SHORT_TIMEOUT).wait()
+
+ def test_unary_unary(self):
+ call_options = interfaces.grpc_call_options(
+ disable_compression=True, credentials=self._call_credentials)
+ response = getattr(self._dynamic_stub, _UNARY_UNARY)(
+ _REQUEST,
+ test_constants.LONG_TIMEOUT,
+ protocol_options=call_options)
+ self.assertEqual(_RESPONSE, response)
+ self.assertIsNotNone(self._servicer.peer())
+ invocation_metadata = [
+ (metadatum.key, metadatum.value)
+ for metadatum in self._servicer._invocation_metadata
+ ]
+ self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
+ _PER_RPC_CREDENTIALS_METADATA_VALUE),
+ invocation_metadata)
+
+ def test_unary_stream(self):
+ call_options = interfaces.grpc_call_options(
+ disable_compression=True, credentials=self._call_credentials)
+ response_iterator = getattr(self._dynamic_stub, _UNARY_STREAM)(
+ _REQUEST,
+ test_constants.LONG_TIMEOUT,
+ protocol_options=call_options)
+ self._servicer.block_until_serviced()
+ self.assertIsNotNone(self._servicer.peer())
+ invocation_metadata = [
+ (metadatum.key, metadatum.value)
+ for metadatum in self._servicer._invocation_metadata
+ ]
+ self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
+ _PER_RPC_CREDENTIALS_METADATA_VALUE),
+ invocation_metadata)
+
+ def test_stream_unary(self):
+ call_options = interfaces.grpc_call_options(
+ credentials=self._call_credentials)
+ request_iterator = _BlockingIterator(iter((_REQUEST,)))
+ response_future = getattr(self._dynamic_stub, _STREAM_UNARY).future(
+ request_iterator,
+ test_constants.LONG_TIMEOUT,
+ protocol_options=call_options)
+ response_future.protocol_context().disable_next_request_compression()
+ request_iterator.allow()
+ response_future.protocol_context().disable_next_request_compression()
+ request_iterator.allow()
+ self._servicer.block_until_serviced()
+ self.assertIsNotNone(self._servicer.peer())
+ self.assertEqual(_RESPONSE, response_future.result())
+ invocation_metadata = [
+ (metadatum.key, metadatum.value)
+ for metadatum in self._servicer._invocation_metadata
+ ]
+ self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
+ _PER_RPC_CREDENTIALS_METADATA_VALUE),
+ invocation_metadata)
+
+ def test_stream_stream(self):
+ call_options = interfaces.grpc_call_options(
+ credentials=self._call_credentials)
+ request_iterator = _BlockingIterator(iter((_REQUEST,)))
+ response_iterator = getattr(self._dynamic_stub, _STREAM_STREAM)(
+ request_iterator,
+ test_constants.SHORT_TIMEOUT,
+ protocol_options=call_options)
+ response_iterator.protocol_context().disable_next_request_compression()
+ request_iterator.allow()
+ response = next(response_iterator)
+ response_iterator.protocol_context().disable_next_request_compression()
+ request_iterator.allow()
+ self._servicer.block_until_serviced()
+ self.assertIsNotNone(self._servicer.peer())
+ self.assertEqual(_RESPONSE, response)
+ invocation_metadata = [
+ (metadatum.key, metadatum.value)
+ for metadatum in self._servicer._invocation_metadata
+ ]
+ self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
+ _PER_RPC_CREDENTIALS_METADATA_VALUE),
+ invocation_metadata)
class ContextManagementAndLifecycleTest(unittest.TestCase):
- def setUp(self):
- self._servicer = _Servicer()
- self._method_implementations = {
- (_GROUP, _UNARY_UNARY):
+ def setUp(self):
+ self._servicer = _Servicer()
+ self._method_implementations = {
+ (_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
- (_GROUP, _UNARY_STREAM):
+ (_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
- (_GROUP, _STREAM_UNARY):
+ (_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
- (_GROUP, _STREAM_STREAM):
+ (_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
- }
-
- self._cardinalities = {
- _UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
- _UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
- _STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
- _STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
- }
-
- self._server_options = implementations.server_options(
- thread_pool_size=test_constants.POOL_SIZE)
- self._server_credentials = implementations.ssl_server_credentials(
- [(resources.private_key(), resources.certificate_chain(),),])
- self._channel_credentials = implementations.ssl_channel_credentials(
- resources.test_root_certificates())
- self._stub_options = implementations.stub_options(
- thread_pool_size=test_constants.POOL_SIZE)
-
- def test_stub_context(self):
- server = implementations.server(
- self._method_implementations, options=self._server_options)
- port = server.add_secure_port('[::]:0', self._server_credentials)
- server.start()
-
- channel = test_utilities.not_really_secure_channel(
- 'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
- dynamic_stub = implementations.dynamic_stub(
- channel, _GROUP, self._cardinalities, options=self._stub_options)
- for _ in range(100):
- with dynamic_stub:
- pass
- for _ in range(10):
- with dynamic_stub:
- call_options = interfaces.grpc_call_options(
- disable_compression=True)
- response = getattr(dynamic_stub, _UNARY_UNARY)(
- _REQUEST, test_constants.LONG_TIMEOUT,
- protocol_options=call_options)
- self.assertEqual(_RESPONSE, response)
- self.assertIsNotNone(self._servicer.peer())
-
- server.stop(test_constants.SHORT_TIMEOUT).wait()
-
- def test_server_lifecycle(self):
- for _ in range(100):
- server = implementations.server(
- self._method_implementations, options=self._server_options)
- port = server.add_secure_port('[::]:0', self._server_credentials)
- server.start()
- server.stop(test_constants.SHORT_TIMEOUT).wait()
- for _ in range(100):
- server = implementations.server(
- self._method_implementations, options=self._server_options)
- server.add_secure_port('[::]:0', self._server_credentials)
- server.add_insecure_port('[::]:0')
- with server:
- server.stop(test_constants.SHORT_TIMEOUT)
- server.stop(test_constants.SHORT_TIMEOUT)
+ }
+
+ self._cardinalities = {
+ _UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
+ _UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
+ _STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
+ _STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
+ }
+
+ self._server_options = implementations.server_options(
+ thread_pool_size=test_constants.POOL_SIZE)
+ self._server_credentials = implementations.ssl_server_credentials([(
+ resources.private_key(),
+ resources.certificate_chain(),),])
+ self._channel_credentials = implementations.ssl_channel_credentials(
+ resources.test_root_certificates())
+ self._stub_options = implementations.stub_options(
+ thread_pool_size=test_constants.POOL_SIZE)
+
+ def test_stub_context(self):
+ server = implementations.server(
+ self._method_implementations, options=self._server_options)
+ port = server.add_secure_port('[::]:0', self._server_credentials)
+ server.start()
+
+ channel = test_utilities.not_really_secure_channel(
+ 'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
+ dynamic_stub = implementations.dynamic_stub(
+ channel, _GROUP, self._cardinalities, options=self._stub_options)
+ for _ in range(100):
+ with dynamic_stub:
+ pass
+ for _ in range(10):
+ with dynamic_stub:
+ call_options = interfaces.grpc_call_options(
+ disable_compression=True)
+ response = getattr(dynamic_stub, _UNARY_UNARY)(
+ _REQUEST,
+ test_constants.LONG_TIMEOUT,
+ protocol_options=call_options)
+ self.assertEqual(_RESPONSE, response)
+ self.assertIsNotNone(self._servicer.peer())
+
+ server.stop(test_constants.SHORT_TIMEOUT).wait()
+
+ def test_server_lifecycle(self):
+ for _ in range(100):
+ server = implementations.server(
+ self._method_implementations, options=self._server_options)
+ port = server.add_secure_port('[::]:0', self._server_credentials)
+ server.start()
+ server.stop(test_constants.SHORT_TIMEOUT).wait()
+ for _ in range(100):
+ server = implementations.server(
+ self._method_implementations, options=self._server_options)
+ server.add_secure_port('[::]:0', self._server_credentials)
+ server.add_insecure_port('[::]:0')
+ with server:
+ server.stop(test_constants.SHORT_TIMEOUT)
+ server.stop(test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
index 5d826a269d..49d683b8a6 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc.beta._connectivity_channel."""
import unittest
@@ -36,13 +35,13 @@ from grpc.beta import interfaces
class ConnectivityStatesTest(unittest.TestCase):
- def testBetaConnectivityStates(self):
- self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
- self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
- self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
- self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
- self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
+ def testBetaConnectivityStates(self):
+ self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
index 3a67516906..f421442624 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests Face interface compliance of the gRPC Python Beta API."""
import collections
@@ -47,94 +46,97 @@ _SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
class _SerializationBehaviors(
- collections.namedtuple(
- '_SerializationBehaviors',
- ('request_serializers', 'request_deserializers', 'response_serializers',
- 'response_deserializers',))):
- pass
+ collections.namedtuple('_SerializationBehaviors', (
+ 'request_serializers',
+ 'request_deserializers',
+ 'response_serializers',
+ 'response_deserializers',))):
+ pass
def _serialization_behaviors_from_test_methods(test_methods):
- request_serializers = {}
- request_deserializers = {}
- response_serializers = {}
- response_deserializers = {}
- for (group, method), test_method in six.iteritems(test_methods):
- request_serializers[group, method] = test_method.serialize_request
- request_deserializers[group, method] = test_method.deserialize_request
- response_serializers[group, method] = test_method.serialize_response
- response_deserializers[group, method] = test_method.deserialize_response
- return _SerializationBehaviors(
- request_serializers, request_deserializers, response_serializers,
- response_deserializers)
+ request_serializers = {}
+ request_deserializers = {}
+ response_serializers = {}
+ response_deserializers = {}
+ for (group, method), test_method in six.iteritems(test_methods):
+ request_serializers[group, method] = test_method.serialize_request
+ request_deserializers[group, method] = test_method.deserialize_request
+ response_serializers[group, method] = test_method.serialize_response
+ response_deserializers[group, method] = test_method.deserialize_response
+ return _SerializationBehaviors(request_serializers, request_deserializers,
+ response_serializers, response_deserializers)
class _Implementation(test_interfaces.Implementation):
- def instantiate(
- self, methods, method_implementations, multi_method_implementation):
- serialization_behaviors = _serialization_behaviors_from_test_methods(
- methods)
- # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
- service = next(iter(methods))[0]
- # TODO(nathaniel): Add a "cardinalities_by_group" attribute to
- # _digest.TestServiceDigest.
- cardinalities = {
- method: method_object.cardinality()
- for (group, method), method_object in six.iteritems(methods)}
-
- server_options = implementations.server_options(
- request_deserializers=serialization_behaviors.request_deserializers,
- response_serializers=serialization_behaviors.response_serializers,
- thread_pool_size=test_constants.POOL_SIZE)
- server = implementations.server(
- method_implementations, options=server_options)
- server_credentials = implementations.ssl_server_credentials(
- [(resources.private_key(), resources.certificate_chain(),),])
- port = server.add_secure_port('[::]:0', server_credentials)
- server.start()
- channel_credentials = implementations.ssl_channel_credentials(
- resources.test_root_certificates())
- channel = test_utilities.not_really_secure_channel(
- 'localhost', port, channel_credentials, _SERVER_HOST_OVERRIDE)
- stub_options = implementations.stub_options(
- request_serializers=serialization_behaviors.request_serializers,
- response_deserializers=serialization_behaviors.response_deserializers,
- thread_pool_size=test_constants.POOL_SIZE)
- generic_stub = implementations.generic_stub(channel, options=stub_options)
- dynamic_stub = implementations.dynamic_stub(
- channel, service, cardinalities, options=stub_options)
- return generic_stub, {service: dynamic_stub}, server
-
- def destantiate(self, memo):
- memo.stop(test_constants.SHORT_TIMEOUT).wait()
-
- def invocation_metadata(self):
- return grpc_test_common.INVOCATION_INITIAL_METADATA
-
- def initial_metadata(self):
- return grpc_test_common.SERVICE_INITIAL_METADATA
-
- def terminal_metadata(self):
- return grpc_test_common.SERVICE_TERMINAL_METADATA
-
- def code(self):
- return interfaces.StatusCode.OK
-
- def details(self):
- return grpc_test_common.DETAILS
-
- def metadata_transmitted(self, original_metadata, transmitted_metadata):
- return original_metadata is None or grpc_test_common.metadata_transmitted(
- original_metadata, transmitted_metadata)
+ def instantiate(self, methods, method_implementations,
+ multi_method_implementation):
+ serialization_behaviors = _serialization_behaviors_from_test_methods(
+ methods)
+ # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
+ service = next(iter(methods))[0]
+ # TODO(nathaniel): Add a "cardinalities_by_group" attribute to
+ # _digest.TestServiceDigest.
+ cardinalities = {
+ method: method_object.cardinality()
+ for (group, method), method_object in six.iteritems(methods)
+ }
+
+ server_options = implementations.server_options(
+ request_deserializers=serialization_behaviors.request_deserializers,
+ response_serializers=serialization_behaviors.response_serializers,
+ thread_pool_size=test_constants.POOL_SIZE)
+ server = implementations.server(
+ method_implementations, options=server_options)
+ server_credentials = implementations.ssl_server_credentials([(
+ resources.private_key(),
+ resources.certificate_chain(),),])
+ port = server.add_secure_port('[::]:0', server_credentials)
+ server.start()
+ channel_credentials = implementations.ssl_channel_credentials(
+ resources.test_root_certificates())
+ channel = test_utilities.not_really_secure_channel(
+ 'localhost', port, channel_credentials, _SERVER_HOST_OVERRIDE)
+ stub_options = implementations.stub_options(
+ request_serializers=serialization_behaviors.request_serializers,
+ response_deserializers=serialization_behaviors.
+ response_deserializers,
+ thread_pool_size=test_constants.POOL_SIZE)
+ generic_stub = implementations.generic_stub(
+ channel, options=stub_options)
+ dynamic_stub = implementations.dynamic_stub(
+ channel, service, cardinalities, options=stub_options)
+ return generic_stub, {service: dynamic_stub}, server
+
+ def destantiate(self, memo):
+ memo.stop(test_constants.SHORT_TIMEOUT).wait()
+
+ def invocation_metadata(self):
+ return grpc_test_common.INVOCATION_INITIAL_METADATA
+
+ def initial_metadata(self):
+ return grpc_test_common.SERVICE_INITIAL_METADATA
+
+ def terminal_metadata(self):
+ return grpc_test_common.SERVICE_TERMINAL_METADATA
+
+ def code(self):
+ return interfaces.StatusCode.OK
+
+ def details(self):
+ return grpc_test_common.DETAILS
+
+ def metadata_transmitted(self, original_metadata, transmitted_metadata):
+ return original_metadata is None or grpc_test_common.metadata_transmitted(
+ original_metadata, transmitted_metadata)
def load_tests(loader, tests, pattern):
- return unittest.TestSuite(
- tests=tuple(
- loader.loadTestsFromTestCase(test_case_class)
- for test_case_class in test_cases.test_cases(_Implementation())))
+ return unittest.TestSuite(tests=tuple(
+ loader.loadTestsFromTestCase(test_case_class)
+ for test_case_class in test_cases.test_cases(_Implementation())))
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
index 127f93e9bb..69bb5cc2a5 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests the implementations module of the gRPC Python Beta API."""
import datetime
@@ -40,31 +39,32 @@ from tests.unit import resources
class ChannelCredentialsTest(unittest.TestCase):
- def test_runtime_provided_root_certificates(self):
- channel_credentials = implementations.ssl_channel_credentials()
- self.assertIsInstance(
- channel_credentials, implementations.ChannelCredentials)
-
- def test_application_provided_root_certificates(self):
- channel_credentials = implementations.ssl_channel_credentials(
- resources.test_root_certificates())
- self.assertIsInstance(
- channel_credentials, implementations.ChannelCredentials)
+ def test_runtime_provided_root_certificates(self):
+ channel_credentials = implementations.ssl_channel_credentials()
+ self.assertIsInstance(channel_credentials,
+ implementations.ChannelCredentials)
+
+ def test_application_provided_root_certificates(self):
+ channel_credentials = implementations.ssl_channel_credentials(
+ resources.test_root_certificates())
+ self.assertIsInstance(channel_credentials,
+ implementations.ChannelCredentials)
class CallCredentialsTest(unittest.TestCase):
- def test_google_call_credentials(self):
- creds = oauth2client_client.GoogleCredentials(
- 'token', 'client_id', 'secret', 'refresh_token',
- datetime.datetime(2008, 6, 24), 'https://refresh.uri.com/',
- 'user_agent')
- call_creds = implementations.google_call_credentials(creds)
- self.assertIsInstance(call_creds, implementations.CallCredentials)
+ def test_google_call_credentials(self):
+ creds = oauth2client_client.GoogleCredentials(
+ 'token', 'client_id', 'secret', 'refresh_token',
+ datetime.datetime(2008, 6, 24), 'https://refresh.uri.com/',
+ 'user_agent')
+ call_creds = implementations.google_call_credentials(creds)
+ self.assertIsInstance(call_creds, implementations.CallCredentials)
+
+ def test_access_token_call_credentials(self):
+ call_creds = implementations.access_token_call_credentials('token')
+ self.assertIsInstance(call_creds, implementations.CallCredentials)
- def test_access_token_call_credentials(self):
- call_creds = implementations.access_token_call_credentials('token')
- self.assertIsInstance(call_creds, implementations.CallCredentials)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py b/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py
index 37b8c49120..ce7b91e9fe 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of RPC-method-not-found behavior."""
import unittest
@@ -39,37 +38,38 @@ from tests.unit.framework.common import test_constants
class NotFoundTest(unittest.TestCase):
- def setUp(self):
- self._server = implementations.server({})
- port = self._server.add_insecure_port('[::]:0')
- channel = implementations.insecure_channel('localhost', port)
- self._generic_stub = implementations.generic_stub(channel)
- self._server.start()
+ def setUp(self):
+ self._server = implementations.server({})
+ port = self._server.add_insecure_port('[::]:0')
+ channel = implementations.insecure_channel('localhost', port)
+ self._generic_stub = implementations.generic_stub(channel)
+ self._server.start()
- def tearDown(self):
- self._server.stop(0).wait()
- self._generic_stub = None
+ def tearDown(self):
+ self._server.stop(0).wait()
+ self._generic_stub = None
- def test_blocking_unary_unary_not_found(self):
- with self.assertRaises(face.LocalError) as exception_assertion_context:
- self._generic_stub.blocking_unary_unary(
- 'groop', 'meffod', b'abc', test_constants.LONG_TIMEOUT,
- with_call=True)
- self.assertIs(
- exception_assertion_context.exception.code,
- interfaces.StatusCode.UNIMPLEMENTED)
+ def test_blocking_unary_unary_not_found(self):
+ with self.assertRaises(face.LocalError) as exception_assertion_context:
+ self._generic_stub.blocking_unary_unary(
+ 'groop',
+ 'meffod',
+ b'abc',
+ test_constants.LONG_TIMEOUT,
+ with_call=True)
+ self.assertIs(exception_assertion_context.exception.code,
+ interfaces.StatusCode.UNIMPLEMENTED)
- def test_future_stream_unary_not_found(self):
- rpc_future = self._generic_stub.future_stream_unary(
- 'grupe', 'mevvod', [b'def'], test_constants.LONG_TIMEOUT)
- with self.assertRaises(face.LocalError) as exception_assertion_context:
- rpc_future.result()
- self.assertIs(
- exception_assertion_context.exception.code,
- interfaces.StatusCode.UNIMPLEMENTED)
- self.assertIs(
- rpc_future.exception().code, interfaces.StatusCode.UNIMPLEMENTED)
+ def test_future_stream_unary_not_found(self):
+ rpc_future = self._generic_stub.future_stream_unary(
+ 'grupe', 'mevvod', iter([b'def']), test_constants.LONG_TIMEOUT)
+ with self.assertRaises(face.LocalError) as exception_assertion_context:
+ rpc_future.result()
+ self.assertIs(exception_assertion_context.exception.code,
+ interfaces.StatusCode.UNIMPLEMENTED)
+ self.assertIs(rpc_future.exception().code,
+ interfaces.StatusCode.UNIMPLEMENTED)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
index 9cce96cc85..e8e62c322a 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests of grpc.beta.utilities."""
import threading
@@ -41,68 +40,68 @@ from tests.unit.framework.common import test_constants
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._value = None
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
- def accept_value(self, value):
- with self._condition:
- self._value = value
- self._condition.notify_all()
+ def accept_value(self, value):
+ with self._condition:
+ self._value = value
+ self._condition.notify_all()
- def block_until_called(self):
- with self._condition:
- while self._value is None:
- self._condition.wait()
- return self._value
+ def block_until_called(self):
+ with self._condition:
+ while self._value is None:
+ self._condition.wait()
+ return self._value
class ChannelConnectivityTest(unittest.TestCase):
- def test_lonely_channel_connectivity(self):
- channel = implementations.insecure_channel('localhost', 12345)
- callback = _Callback()
-
- ready_future = utilities.channel_ready_future(channel)
- ready_future.add_done_callback(callback.accept_value)
- with self.assertRaises(future.TimeoutError):
- ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
- self.assertFalse(ready_future.cancelled())
- self.assertFalse(ready_future.done())
- self.assertTrue(ready_future.running())
- ready_future.cancel()
- value_passed_to_callback = callback.block_until_called()
- self.assertIs(ready_future, value_passed_to_callback)
- self.assertTrue(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
-
- def test_immediately_connectable_channel_connectivity(self):
- server = implementations.server({})
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = implementations.insecure_channel('localhost', port)
- callback = _Callback()
-
- try:
- ready_future = utilities.channel_ready_future(channel)
- ready_future.add_done_callback(callback.accept_value)
- self.assertIsNone(
- ready_future.result(timeout=test_constants.LONG_TIMEOUT))
- value_passed_to_callback = callback.block_until_called()
- self.assertIs(ready_future, value_passed_to_callback)
- self.assertFalse(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
- # Cancellation after maturity has no effect.
- ready_future.cancel()
- self.assertFalse(ready_future.cancelled())
- self.assertTrue(ready_future.done())
- self.assertFalse(ready_future.running())
- finally:
- ready_future.cancel()
- server.stop(0)
+ def test_lonely_channel_connectivity(self):
+ channel = implementations.insecure_channel('localhost', 12345)
+ callback = _Callback()
+
+ ready_future = utilities.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ with self.assertRaises(future.TimeoutError):
+ ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
+ self.assertFalse(ready_future.cancelled())
+ self.assertFalse(ready_future.done())
+ self.assertTrue(ready_future.running())
+ ready_future.cancel()
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertTrue(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+
+ def test_immediately_connectable_channel_connectivity(self):
+ server = implementations.server({})
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = implementations.insecure_channel('localhost', port)
+ callback = _Callback()
+
+ try:
+ ready_future = utilities.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ self.assertIsNone(
+ ready_future.result(timeout=test_constants.LONG_TIMEOUT))
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+ # Cancellation after maturity has no effect.
+ ready_future.cancel()
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+ finally:
+ ready_future.cancel()
+ server.stop(0)
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/test_utilities.py b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
index 692da9c97d..f542420683 100644
--- a/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
+++ b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
@@ -26,16 +26,15 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test-appropriate entry points into the gRPC Python Beta API."""
import grpc
from grpc.beta import implementations
-def not_really_secure_channel(
- host, port, channel_credentials, server_host_override):
- """Creates an insecure Channel to a remote host.
+def not_really_secure_channel(host, port, channel_credentials,
+ server_host_override):
+ """Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
@@ -48,8 +47,8 @@ def not_really_secure_channel(
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
- target = '%s:%d' % (host, port)
- channel = grpc.secure_channel(
- target, channel_credentials,
- (('grpc.ssl_target_name_override', server_host_override,),))
- return implementations.Channel(channel)
+ target = '%s:%d' % (host, port)
+ channel = grpc.secure_channel(target, channel_credentials, ((
+ 'grpc.ssl_target_name_override',
+ server_host_override,),))
+ return implementations.Channel(channel)
diff --git a/src/python/grpcio_tests/tests/unit/framework/__init__.py b/src/python/grpcio_tests/tests/unit/framework/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/framework/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/common/__init__.py b/src/python/grpcio_tests/tests/unit/framework/common/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/common/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/framework/common/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py b/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py
index b6682d396c..905483c08d 100644
--- a/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py
+++ b/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Constants shared among tests throughout RPC Framework."""
# Value for maximum duration in seconds that a test is allowed for its actual
diff --git a/src/python/grpcio_tests/tests/unit/framework/common/test_control.py b/src/python/grpcio_tests/tests/unit/framework/common/test_control.py
index 088e2f8b88..af08731b1e 100644
--- a/src/python/grpcio_tests/tests/unit/framework/common/test_control.py
+++ b/src/python/grpcio_tests/tests/unit/framework/common/test_control.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Code for instructing systems under test to block or fail."""
import abc
@@ -37,7 +36,7 @@ import six
class Defect(Exception):
- """Simulates a programming defect raised into in a system under test.
+ """Simulates a programming defect raised into in a system under test.
Use of a standard exception type is too easily misconstrued as an actual
defect in either the test infrastructure or the system under test.
@@ -45,7 +44,7 @@ class Defect(Exception):
class Control(six.with_metaclass(abc.ABCMeta)):
- """An object that accepts program control from a system under test.
+ """An object that accepts program control from a system under test.
Systems under test passed a Control should call its control() method
frequently during execution. The control() method may block, raise an
@@ -53,61 +52,61 @@ class Control(six.with_metaclass(abc.ABCMeta)):
the system under test to simulate hanging, failing, or functioning.
"""
- @abc.abstractmethod
- def control(self):
- """Potentially does anything."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def control(self):
+ """Potentially does anything."""
+ raise NotImplementedError()
class PauseFailControl(Control):
- """A Control that can be used to pause or fail code under control.
+ """A Control that can be used to pause or fail code under control.
This object is only safe for use from two threads: one of the system under
test calling control and the other from the test system calling pause,
block_until_paused, and fail.
"""
- def __init__(self):
- self._condition = threading.Condition()
- self._pause = False
- self._paused = False
- self._fail = False
-
- def control(self):
- with self._condition:
- if self._fail:
- raise Defect()
-
- while self._pause:
- self._paused = True
- self._condition.notify_all()
- self._condition.wait()
- self._paused = False
-
- @contextlib.contextmanager
- def pause(self):
- """Pauses code under control while controlling code is in context."""
- with self._condition:
- self._pause = True
- yield
- with self._condition:
- self._pause = False
- self._condition.notify_all()
-
- def block_until_paused(self):
- """Blocks controlling code until code under control is paused.
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._pause = False
+ self._paused = False
+ self._fail = False
+
+ def control(self):
+ with self._condition:
+ if self._fail:
+ raise Defect()
+
+ while self._pause:
+ self._paused = True
+ self._condition.notify_all()
+ self._condition.wait()
+ self._paused = False
+
+ @contextlib.contextmanager
+ def pause(self):
+ """Pauses code under control while controlling code is in context."""
+ with self._condition:
+ self._pause = True
+ yield
+ with self._condition:
+ self._pause = False
+ self._condition.notify_all()
+
+ def block_until_paused(self):
+ """Blocks controlling code until code under control is paused.
May only be called within the context of a pause call.
"""
- with self._condition:
- while not self._paused:
- self._condition.wait()
-
- @contextlib.contextmanager
- def fail(self):
- """Fails code under control while controlling code is in context."""
- with self._condition:
- self._fail = True
- yield
- with self._condition:
- self._fail = False
+ with self._condition:
+ while not self._paused:
+ self._condition.wait()
+
+ @contextlib.contextmanager
+ def fail(self):
+ """Fails code under control while controlling code is in context."""
+ with self._condition:
+ self._fail = True
+ yield
+ with self._condition:
+ self._fail = False
diff --git a/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py b/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py
index ea2d2812ce..13ceec31a0 100644
--- a/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py
+++ b/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Governs coverage for tests of RPCs throughout RPC Framework."""
import abc
@@ -38,80 +37,80 @@ import six
class Coverage(six.with_metaclass(abc.ABCMeta)):
- """Specification of test coverage."""
+ """Specification of test coverage."""
- @abc.abstractmethod
- def testSuccessfulUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testSuccessfulUnaryRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testSuccessfulStreamRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testSuccessfulStreamRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testSuccessfulStreamRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testSequentialInvocations(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testSequentialInvocations(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testParallelInvocations(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testParallelInvocations(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testWaitingForSomeButNotAllParallelInvocations(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testCancelledUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testCancelledUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testCancelledUnaryRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testCancelledUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testCancelledStreamRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testCancelledStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testCancelledStreamRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testCancelledStreamRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testExpiredUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testExpiredUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testExpiredUnaryRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testExpiredUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testExpiredStreamRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testExpiredStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testExpiredStreamRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testExpiredStreamRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testFailedUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testFailedUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testFailedUnaryRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testFailedUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testFailedStreamRequestUnaryResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testFailedStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
- @abc.abstractmethod
- def testFailedStreamRequestStreamResponse(self):
- raise NotImplementedError()
+ @abc.abstractmethod
+ def testFailedStreamRequestStreamResponse(self):
+ raise NotImplementedError()
diff --git a/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py b/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py b/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py
index 330e445d43..19e8cbdd8e 100644
--- a/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py
+++ b/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tests for grpc.framework.foundation.logging_pool."""
import threading
@@ -39,50 +38,51 @@ _POOL_SIZE = 16
class _CallableObject(object):
- def __init__(self):
- self._lock = threading.Lock()
- self._passed_values = []
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._passed_values = []
- def __call__(self, value):
- with self._lock:
- self._passed_values.append(value)
+ def __call__(self, value):
+ with self._lock:
+ self._passed_values.append(value)
- def passed_values(self):
- with self._lock:
- return tuple(self._passed_values)
+ def passed_values(self):
+ with self._lock:
+ return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
- def testUpAndDown(self):
- pool = logging_pool.pool(_POOL_SIZE)
- pool.shutdown(wait=True)
+ def testUpAndDown(self):
+ pool = logging_pool.pool(_POOL_SIZE)
+ pool.shutdown(wait=True)
- with logging_pool.pool(_POOL_SIZE) as pool:
- self.assertIsNotNone(pool)
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ self.assertIsNotNone(pool)
- def testTaskExecuted(self):
- test_list = []
+ def testTaskExecuted(self):
+ test_list = []
- with logging_pool.pool(_POOL_SIZE) as pool:
- pool.submit(lambda: test_list.append(object())).result()
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ pool.submit(lambda: test_list.append(object())).result()
- self.assertTrue(test_list)
+ self.assertTrue(test_list)
- def testException(self):
- with logging_pool.pool(_POOL_SIZE) as pool:
- raised_exception = pool.submit(lambda: 1/0).exception()
+ def testException(self):
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ raised_exception = pool.submit(lambda: 1 / 0).exception()
- self.assertIsNotNone(raised_exception)
+ self.assertIsNotNone(raised_exception)
- def testCallableObjectExecuted(self):
- callable_object = _CallableObject()
- passed_object = object()
- with logging_pool.pool(_POOL_SIZE) as pool:
- future = pool.submit(callable_object, passed_object)
- self.assertIsNone(future.result())
- self.assertSequenceEqual((passed_object,), callable_object.passed_values())
+ def testCallableObjectExecuted(self):
+ callable_object = _CallableObject()
+ passed_object = object()
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ future = pool.submit(callable_object, passed_object)
+ self.assertIsNone(future.result())
+ self.assertSequenceEqual((passed_object,),
+ callable_object.passed_values())
if __name__ == '__main__':
- unittest.main(verbosity=2)
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py b/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
index 098a53d5e7..2929e4dd78 100644
--- a/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
+++ b/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
@@ -26,48 +26,47 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Utilities for testing stream-related code."""
from grpc.framework.foundation import stream
class TestConsumer(stream.Consumer):
- """A stream.Consumer instrumented for testing.
+ """A stream.Consumer instrumented for testing.
Attributes:
calls: A sequence of value-termination pairs describing the history of calls
made on this object.
"""
- def __init__(self):
- self.calls = []
+ def __init__(self):
+ self.calls = []
- def consume(self, value):
- """See stream.Consumer.consume for specification."""
- self.calls.append((value, False))
+ def consume(self, value):
+ """See stream.Consumer.consume for specification."""
+ self.calls.append((value, False))
- def terminate(self):
- """See stream.Consumer.terminate for specification."""
- self.calls.append((None, True))
+ def terminate(self):
+ """See stream.Consumer.terminate for specification."""
+ self.calls.append((None, True))
- def consume_and_terminate(self, value):
- """See stream.Consumer.consume_and_terminate for specification."""
- self.calls.append((value, True))
+ def consume_and_terminate(self, value):
+ """See stream.Consumer.consume_and_terminate for specification."""
+ self.calls.append((value, True))
- def is_legal(self):
- """Reports whether or not a legal sequence of calls has been made."""
- terminated = False
- for value, terminal in self.calls:
- if terminated:
- return False
- elif terminal:
- terminated = True
- elif value is None:
- return False
- else: # pylint: disable=useless-else-on-loop
- return True
+ def is_legal(self):
+ """Reports whether or not a legal sequence of calls has been made."""
+ terminated = False
+ for value, terminal in self.calls:
+ if terminated:
+ return False
+ elif terminal:
+ terminated = True
+ elif value is None:
+ return False
+ else: # pylint: disable=useless-else-on-loop
+ return True
- def values(self):
- """Returns the sequence of values that have been passed to this Consumer."""
- return [value for value, _ in self.calls if value]
+ def values(self):
+ """Returns the sequence of values that have been passed to this Consumer."""
+ return [value for value, _ in self.calls if value]
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
index 1ea356c0bf..2aec25c9ef 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""A test constant working around issue 3069."""
# test_constants is referenced from specification in this module.
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
index 7086519106..b89398809f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
@@ -26,5 +26,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
index e338aaa396..a79834f96f 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
@@ -50,246 +49,254 @@ from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
-class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.TestCase)):
- """A test of the Face layer of RPC Framework.
+class TestCase(
+ six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
+ unittest.TestCase)):
+ """A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
- NAME = 'BlockingInvocationInlineServiceTest'
+ NAME = 'BlockingInvocationInlineServiceTest'
- def setUp(self):
- """See unittest.TestCase.setUp for full specification.
+ def setUp(self):
+ """See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
- self._control = test_control.PauseFailControl()
- self._digest = _digest.digest(
- _stock_service.STOCK_TEST_SERVICE, self._control, None)
+ self._control = test_control.PauseFailControl()
+ self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
+ self._control, None)
- generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
- self._digest.methods, self._digest.inline_method_implementations, None)
- self._invoker = self.invoker_constructor.construct_invoker(
- generic_stub, dynamic_stubs, self._digest.methods)
+ generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
+ self._digest.methods, self._digest.inline_method_implementations,
+ None)
+ self._invoker = self.invoker_constructor.construct_invoker(
+ generic_stub, dynamic_stubs, self._digest.methods)
- def tearDown(self):
- """See unittest.TestCase.tearDown for full specification.
+ def tearDown(self):
+ """See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
- self._invoker = None
- self.implementation.destantiate(self._memo)
-
- def testSuccessfulUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response, call = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT, with_call=True)
-
- test_messages.verify(request, response, self)
-
- def testSuccessfulUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response_iterator = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(request, responses, self)
-
- def testSuccessfulStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- response, call = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT, with_call=True)
-
- test_messages.verify(requests, response, self)
-
- def testSuccessfulStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- response_iterator = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(requests, responses, self)
-
- def testSequentialInvocations(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response = self._invoker.blocking(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
-
- test_messages.verify(first_request, first_response, self)
-
- second_response = self._invoker.blocking(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
-
- test_messages.verify(second_request, second_response, self)
-
- def testParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures = []
- for _ in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = pool.submit(
- self._invoker.blocking(group, method), request,
- test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures.append(response_future)
-
- responses = [
- response_future.result() for response_future in response_futures]
-
- for request, response in zip(requests, responses):
- test_messages.verify(request, response, self)
- pool.shutdown(wait=True)
-
- def testWaitingForSomeButNotAllParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures_to_indices = {}
- for index in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = pool.submit(
- self._invoker.blocking(group, method), request,
- test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures_to_indices[response_future] = index
-
- some_completed_response_futures_iterator = itertools.islice(
- futures.as_completed(response_futures_to_indices),
- test_constants.THREAD_CONCURRENCY // 2)
- for response_future in some_completed_response_futures_iterator:
- index = response_futures_to_indices[response_future]
- test_messages.verify(requests[index], response_future.result(), self)
- pool.shutdown(wait=True)
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledUnaryRequestUnaryResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledUnaryRequestStreamResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledStreamRequestUnaryResponse(self):
- raise NotImplementedError()
-
- @unittest.skip('Cancellation impossible with blocking control flow!')
- def testCancelledStreamRequestStreamResponse(self):
- raise NotImplementedError()
-
- def testExpiredUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- self._invoker.blocking(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
-
- def testExpiredUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.blocking(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testExpiredStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- self._invoker.blocking(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
-
- def testExpiredStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause(), self.assertRaises(
- face.ExpirationError):
- response_iterator = self._invoker.blocking(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testFailedUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
-
- def testFailedUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- response_iterator = self._invoker.blocking(group, method)(
- request, test_constants.LONG_TIMEOUT)
- list(response_iterator)
-
- def testFailedStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
-
- def testFailedStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.fail(), self.assertRaises(face.RemoteError):
- response_iterator = self._invoker.blocking(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- list(response_iterator)
+ self._invoker = None
+ self.implementation.destantiate(self._memo)
+
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response, call = self._invoker.blocking(group, method)(
+ request, test_constants.LONG_TIMEOUT, with_call=True)
+
+ test_messages.verify(request, response, self)
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response_iterator = self._invoker.blocking(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(request, responses, self)
+
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ response, call = self._invoker.blocking(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT, with_call=True)
+
+ test_messages.verify(requests, response, self)
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ response_iterator = self._invoker.blocking(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(requests, responses, self)
+
+ def testSequentialInvocations(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response = self._invoker.blocking(group, method)(
+ first_request, test_constants.LONG_TIMEOUT)
+
+ test_messages.verify(first_request, first_response, self)
+
+ second_response = self._invoker.blocking(group, method)(
+ second_request, test_constants.LONG_TIMEOUT)
+
+ test_messages.verify(second_request, second_response, self)
+
+ def testParallelInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = []
+ response_futures = []
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ request = test_messages.request()
+ response_future = pool.submit(
+ self._invoker.blocking(group, method), request,
+ test_constants.LONG_TIMEOUT)
+ requests.append(request)
+ response_futures.append(response_future)
+
+ responses = [
+ response_future.result()
+ for response_future in response_futures
+ ]
+
+ for request, response in zip(requests, responses):
+ test_messages.verify(request, response, self)
+ pool.shutdown(wait=True)
+
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = []
+ response_futures_to_indices = {}
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request = test_messages.request()
+ response_future = pool.submit(
+ self._invoker.blocking(group, method), request,
+ test_constants.LONG_TIMEOUT)
+ requests.append(request)
+ response_futures_to_indices[response_future] = index
+
+ some_completed_response_futures_iterator = itertools.islice(
+ futures.as_completed(response_futures_to_indices),
+ test_constants.THREAD_CONCURRENCY // 2)
+ for response_future in some_completed_response_futures_iterator:
+ index = response_futures_to_indices[response_future]
+ test_messages.verify(requests[index],
+ response_future.result(), self)
+ pool.shutdown(wait=True)
+
+ @unittest.skip('Cancellation impossible with blocking control flow!')
+ def testCancelledUnaryRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @unittest.skip('Cancellation impossible with blocking control flow!')
+ def testCancelledUnaryRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ @unittest.skip('Cancellation impossible with blocking control flow!')
+ def testCancelledStreamRequestUnaryResponse(self):
+ raise NotImplementedError()
+
+ @unittest.skip('Cancellation impossible with blocking control flow!')
+ def testCancelledStreamRequestStreamResponse(self):
+ raise NotImplementedError()
+
+ def testExpiredUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.pause(), self.assertRaises(
+ face.ExpirationError):
+ self._invoker.blocking(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.pause(), self.assertRaises(
+ face.ExpirationError):
+ response_iterator = self._invoker.blocking(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ list(response_iterator)
+
+ def testExpiredStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.pause(), self.assertRaises(
+ face.ExpirationError):
+ self._invoker.blocking(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+
+ def testExpiredStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.pause(), self.assertRaises(
+ face.ExpirationError):
+ response_iterator = self._invoker.blocking(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ list(response_iterator)
+
+ def testFailedUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.fail(), self.assertRaises(face.RemoteError):
+ self._invoker.blocking(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+
+ def testFailedUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.fail(), self.assertRaises(face.RemoteError):
+ response_iterator = self._invoker.blocking(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ list(response_iterator)
+
+ def testFailedStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.fail(), self.assertRaises(face.RemoteError):
+ self._invoker.blocking(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT)
+
+ def testFailedStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.fail(), self.assertRaises(face.RemoteError):
+ response_iterator = self._invoker.blocking(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT)
+ list(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
index f0befb0b27..0411da0a66 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Code for making a service.TestService more amenable to use in tests."""
import collections
@@ -49,17 +48,16 @@ _IDENTITY = lambda x: x
class TestServiceDigest(
- collections.namedtuple(
- 'TestServiceDigest',
- ('methods',
- 'inline_method_implementations',
- 'event_method_implementations',
- 'multi_method_implementation',
- 'unary_unary_messages_sequences',
- 'unary_stream_messages_sequences',
- 'stream_unary_messages_sequences',
- 'stream_stream_messages_sequences',))):
- """A transformation of a service.TestService.
+ collections.namedtuple('TestServiceDigest', (
+ 'methods',
+ 'inline_method_implementations',
+ 'event_method_implementations',
+ 'multi_method_implementation',
+ 'unary_unary_messages_sequences',
+ 'unary_stream_messages_sequences',
+ 'stream_unary_messages_sequences',
+ 'stream_stream_messages_sequences',))):
+ """A transformation of a service.TestService.
Attributes:
methods: A dict from method group-name pair to test_interfaces.Method object
@@ -88,303 +86,308 @@ class TestServiceDigest(
class _BufferingConsumer(stream.Consumer):
- """A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
+ """A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
- def __init__(self):
- self.consumed = []
- self.terminated = False
+ def __init__(self):
+ self.consumed = []
+ self.terminated = False
- def consume(self, value):
- self.consumed.append(value)
+ def consume(self, value):
+ self.consumed.append(value)
- def terminate(self):
- self.terminated = True
+ def terminate(self):
+ self.terminated = True
- def consume_and_terminate(self, value):
- self.consumed.append(value)
- self.terminated = True
+ def consume_and_terminate(self, value):
+ self.consumed.append(value)
+ self.terminated = True
class _InlineUnaryUnaryMethod(face.MethodImplementation):
- def __init__(self, unary_unary_test_method, control):
- self._test_method = unary_unary_test_method
- self._control = control
+ def __init__(self, unary_unary_test_method, control):
+ self._test_method = unary_unary_test_method
+ self._control = control
- self.cardinality = cardinality.Cardinality.UNARY_UNARY
- self.style = style.Service.INLINE
+ self.cardinality = cardinality.Cardinality.UNARY_UNARY
+ self.style = style.Service.INLINE
- def unary_unary_inline(self, request, context):
- response_list = []
- self._test_method.service(
- request, response_list.append, context, self._control)
- return response_list.pop(0)
+ def unary_unary_inline(self, request, context):
+ response_list = []
+ self._test_method.service(request, response_list.append, context,
+ self._control)
+ return response_list.pop(0)
class _EventUnaryUnaryMethod(face.MethodImplementation):
- def __init__(self, unary_unary_test_method, control, pool):
- self._test_method = unary_unary_test_method
- self._control = control
- self._pool = pool
+ def __init__(self, unary_unary_test_method, control, pool):
+ self._test_method = unary_unary_test_method
+ self._control = control
+ self._pool = pool
- self.cardinality = cardinality.Cardinality.UNARY_UNARY
- self.style = style.Service.EVENT
+ self.cardinality = cardinality.Cardinality.UNARY_UNARY
+ self.style = style.Service.EVENT
- def unary_unary_event(self, request, response_callback, context):
- if self._pool is None:
- self._test_method.service(
- request, response_callback, context, self._control)
- else:
- self._pool.submit(
- self._test_method.service, request, response_callback, context,
- self._control)
+ def unary_unary_event(self, request, response_callback, context):
+ if self._pool is None:
+ self._test_method.service(request, response_callback, context,
+ self._control)
+ else:
+ self._pool.submit(self._test_method.service, request,
+ response_callback, context, self._control)
class _InlineUnaryStreamMethod(face.MethodImplementation):
- def __init__(self, unary_stream_test_method, control):
- self._test_method = unary_stream_test_method
- self._control = control
+ def __init__(self, unary_stream_test_method, control):
+ self._test_method = unary_stream_test_method
+ self._control = control
- self.cardinality = cardinality.Cardinality.UNARY_STREAM
- self.style = style.Service.INLINE
+ self.cardinality = cardinality.Cardinality.UNARY_STREAM
+ self.style = style.Service.INLINE
- def unary_stream_inline(self, request, context):
- response_consumer = _BufferingConsumer()
- self._test_method.service(
- request, response_consumer, context, self._control)
- for response in response_consumer.consumed:
- yield response
+ def unary_stream_inline(self, request, context):
+ response_consumer = _BufferingConsumer()
+ self._test_method.service(request, response_consumer, context,
+ self._control)
+ for response in response_consumer.consumed:
+ yield response
class _EventUnaryStreamMethod(face.MethodImplementation):
- def __init__(self, unary_stream_test_method, control, pool):
- self._test_method = unary_stream_test_method
- self._control = control
- self._pool = pool
+ def __init__(self, unary_stream_test_method, control, pool):
+ self._test_method = unary_stream_test_method
+ self._control = control
+ self._pool = pool
- self.cardinality = cardinality.Cardinality.UNARY_STREAM
- self.style = style.Service.EVENT
+ self.cardinality = cardinality.Cardinality.UNARY_STREAM
+ self.style = style.Service.EVENT
- def unary_stream_event(self, request, response_consumer, context):
- if self._pool is None:
- self._test_method.service(
- request, response_consumer, context, self._control)
- else:
- self._pool.submit(
- self._test_method.service, request, response_consumer, context,
- self._control)
+ def unary_stream_event(self, request, response_consumer, context):
+ if self._pool is None:
+ self._test_method.service(request, response_consumer, context,
+ self._control)
+ else:
+ self._pool.submit(self._test_method.service, request,
+ response_consumer, context, self._control)
class _InlineStreamUnaryMethod(face.MethodImplementation):
- def __init__(self, stream_unary_test_method, control):
- self._test_method = stream_unary_test_method
- self._control = control
+ def __init__(self, stream_unary_test_method, control):
+ self._test_method = stream_unary_test_method
+ self._control = control
- self.cardinality = cardinality.Cardinality.STREAM_UNARY
- self.style = style.Service.INLINE
+ self.cardinality = cardinality.Cardinality.STREAM_UNARY
+ self.style = style.Service.INLINE
- def stream_unary_inline(self, request_iterator, context):
- response_list = []
- request_consumer = self._test_method.service(
- response_list.append, context, self._control)
- for request in request_iterator:
- request_consumer.consume(request)
- request_consumer.terminate()
- return response_list.pop(0)
+ def stream_unary_inline(self, request_iterator, context):
+ response_list = []
+ request_consumer = self._test_method.service(response_list.append,
+ context, self._control)
+ for request in request_iterator:
+ request_consumer.consume(request)
+ request_consumer.terminate()
+ return response_list.pop(0)
class _EventStreamUnaryMethod(face.MethodImplementation):
- def __init__(self, stream_unary_test_method, control, pool):
- self._test_method = stream_unary_test_method
- self._control = control
- self._pool = pool
+ def __init__(self, stream_unary_test_method, control, pool):
+ self._test_method = stream_unary_test_method
+ self._control = control
+ self._pool = pool
- self.cardinality = cardinality.Cardinality.STREAM_UNARY
- self.style = style.Service.EVENT
+ self.cardinality = cardinality.Cardinality.STREAM_UNARY
+ self.style = style.Service.EVENT
- def stream_unary_event(self, response_callback, context):
- request_consumer = self._test_method.service(
- response_callback, context, self._control)
- if self._pool is None:
- return request_consumer
- else:
- return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+ def stream_unary_event(self, response_callback, context):
+ request_consumer = self._test_method.service(response_callback, context,
+ self._control)
+ if self._pool is None:
+ return request_consumer
+ else:
+ return stream_util.ThreadSwitchingConsumer(request_consumer,
+ self._pool)
class _InlineStreamStreamMethod(face.MethodImplementation):
- def __init__(self, stream_stream_test_method, control):
- self._test_method = stream_stream_test_method
- self._control = control
+ def __init__(self, stream_stream_test_method, control):
+ self._test_method = stream_stream_test_method
+ self._control = control
- self.cardinality = cardinality.Cardinality.STREAM_STREAM
- self.style = style.Service.INLINE
+ self.cardinality = cardinality.Cardinality.STREAM_STREAM
+ self.style = style.Service.INLINE
- def stream_stream_inline(self, request_iterator, context):
- response_consumer = _BufferingConsumer()
- request_consumer = self._test_method.service(
- response_consumer, context, self._control)
+ def stream_stream_inline(self, request_iterator, context):
+ response_consumer = _BufferingConsumer()
+ request_consumer = self._test_method.service(response_consumer, context,
+ self._control)
- for request in request_iterator:
- request_consumer.consume(request)
- while response_consumer.consumed:
- yield response_consumer.consumed.pop(0)
- response_consumer.terminate()
+ for request in request_iterator:
+ request_consumer.consume(request)
+ while response_consumer.consumed:
+ yield response_consumer.consumed.pop(0)
+ response_consumer.terminate()
class _EventStreamStreamMethod(face.MethodImplementation):
- def __init__(self, stream_stream_test_method, control, pool):
- self._test_method = stream_stream_test_method
- self._control = control
- self._pool = pool
+ def __init__(self, stream_stream_test_method, control, pool):
+ self._test_method = stream_stream_test_method
+ self._control = control
+ self._pool = pool
- self.cardinality = cardinality.Cardinality.STREAM_STREAM
- self.style = style.Service.EVENT
+ self.cardinality = cardinality.Cardinality.STREAM_STREAM
+ self.style = style.Service.EVENT
- def stream_stream_event(self, response_consumer, context):
- request_consumer = self._test_method.service(
- response_consumer, context, self._control)
- if self._pool is None:
- return request_consumer
- else:
- return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+ def stream_stream_event(self, response_consumer, context):
+ request_consumer = self._test_method.service(response_consumer, context,
+ self._control)
+ if self._pool is None:
+ return request_consumer
+ else:
+ return stream_util.ThreadSwitchingConsumer(request_consumer,
+ self._pool)
class _UnaryConsumer(stream.Consumer):
- """A Consumer that only allows consumption of exactly one value."""
-
- def __init__(self, action):
- self._lock = threading.Lock()
- self._action = action
- self._consumed = False
- self._terminated = False
-
- def consume(self, value):
- with self._lock:
- if self._consumed:
- raise ValueError('Unary consumer already consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._consumed = True
-
- self._action(value)
-
- def terminate(self):
- with self._lock:
- if not self._consumed:
- raise ValueError('Unary consumer hasn\'t yet consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._terminated = True
-
- def consume_and_terminate(self, value):
- with self._lock:
- if self._consumed:
- raise ValueError('Unary consumer already consumed!')
- elif self._terminated:
- raise ValueError('Unary consumer already terminated!')
- else:
- self._consumed = True
- self._terminated = True
-
- self._action(value)
+ """A Consumer that only allows consumption of exactly one value."""
+
+ def __init__(self, action):
+ self._lock = threading.Lock()
+ self._action = action
+ self._consumed = False
+ self._terminated = False
+
+ def consume(self, value):
+ with self._lock:
+ if self._consumed:
+ raise ValueError('Unary consumer already consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._consumed = True
+
+ self._action(value)
+
+ def terminate(self):
+ with self._lock:
+ if not self._consumed:
+ raise ValueError('Unary consumer hasn\'t yet consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._terminated = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._consumed:
+ raise ValueError('Unary consumer already consumed!')
+ elif self._terminated:
+ raise ValueError('Unary consumer already terminated!')
+ else:
+ self._consumed = True
+ self._terminated = True
+
+ self._action(value)
class _UnaryUnaryAdaptation(object):
- def __init__(self, unary_unary_test_method):
- self._method = unary_unary_test_method
+ def __init__(self, unary_unary_test_method):
+ self._method = unary_unary_test_method
+
+ def service(self, response_consumer, context, control):
+
+ def action(request):
+ self._method.service(request,
+ response_consumer.consume_and_terminate,
+ context, control)
- def service(self, response_consumer, context, control):
- def action(request):
- self._method.service(
- request, response_consumer.consume_and_terminate, context, control)
- return _UnaryConsumer(action)
+ return _UnaryConsumer(action)
class _UnaryStreamAdaptation(object):
- def __init__(self, unary_stream_test_method):
- self._method = unary_stream_test_method
+ def __init__(self, unary_stream_test_method):
+ self._method = unary_stream_test_method
+
+ def service(self, response_consumer, context, control):
+
+ def action(request):
+ self._method.service(request, response_consumer, context, control)
- def service(self, response_consumer, context, control):
- def action(request):
- self._method.service(request, response_consumer, context, control)
- return _UnaryConsumer(action)
+ return _UnaryConsumer(action)
class _StreamUnaryAdaptation(object):
- def __init__(self, stream_unary_test_method):
- self._method = stream_unary_test_method
+ def __init__(self, stream_unary_test_method):
+ self._method = stream_unary_test_method
- def service(self, response_consumer, context, control):
- return self._method.service(
- response_consumer.consume_and_terminate, context, control)
+ def service(self, response_consumer, context, control):
+ return self._method.service(response_consumer.consume_and_terminate,
+ context, control)
class _MultiMethodImplementation(face.MultiMethodImplementation):
- def __init__(self, methods, control, pool):
- self._methods = methods
- self._control = control
- self._pool = pool
+ def __init__(self, methods, control, pool):
+ self._methods = methods
+ self._control = control
+ self._pool = pool
- def service(self, group, name, response_consumer, context):
- method = self._methods.get(group, name, None)
- if method is None:
- raise face.NoSuchMethodError(group, name)
- elif self._pool is None:
- return method(response_consumer, context, self._control)
- else:
- request_consumer = method(response_consumer, context, self._control)
- return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+ def service(self, group, name, response_consumer, context):
+ method = self._methods.get(group, name, None)
+ if method is None:
+ raise face.NoSuchMethodError(group, name)
+ elif self._pool is None:
+ return method(response_consumer, context, self._control)
+ else:
+ request_consumer = method(response_consumer, context, self._control)
+ return stream_util.ThreadSwitchingConsumer(request_consumer,
+ self._pool)
class _Assembly(
- collections.namedtuple(
- '_Assembly',
- ['methods', 'inlines', 'events', 'adaptations', 'messages'])):
- """An intermediate structure created when creating a TestServiceDigest."""
-
-
-def _assemble(
- scenarios, identifiers, inline_method_constructor, event_method_constructor,
- adapter, control, pool):
- """Creates an _Assembly from the given scenarios."""
- methods = {}
- inlines = {}
- events = {}
- adaptations = {}
- messages = {}
- for identifier, scenario in six.iteritems(scenarios):
- if identifier in identifiers:
- raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
-
- test_method = scenario[0]
- inline_method = inline_method_constructor(test_method, control)
- event_method = event_method_constructor(test_method, control, pool)
- adaptation = adapter(test_method)
-
- methods[identifier] = test_method
- inlines[identifier] = inline_method
- events[identifier] = event_method
- adaptations[identifier] = adaptation
- messages[identifier] = scenario[1]
-
- return _Assembly(methods, inlines, events, adaptations, messages)
+ collections.namedtuple(
+ '_Assembly',
+ ['methods', 'inlines', 'events', 'adaptations', 'messages'])):
+ """An intermediate structure created when creating a TestServiceDigest."""
+
+
+def _assemble(scenarios, identifiers, inline_method_constructor,
+ event_method_constructor, adapter, control, pool):
+ """Creates an _Assembly from the given scenarios."""
+ methods = {}
+ inlines = {}
+ events = {}
+ adaptations = {}
+ messages = {}
+ for identifier, scenario in six.iteritems(scenarios):
+ if identifier in identifiers:
+ raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
+
+ test_method = scenario[0]
+ inline_method = inline_method_constructor(test_method, control)
+ event_method = event_method_constructor(test_method, control, pool)
+ adaptation = adapter(test_method)
+
+ methods[identifier] = test_method
+ inlines[identifier] = inline_method
+ events[identifier] = event_method
+ adaptations[identifier] = adaptation
+ messages[identifier] = scenario[1]
+
+ return _Assembly(methods, inlines, events, adaptations, messages)
def digest(service, control, pool):
- """Creates a TestServiceDigest from a TestService.
+ """Creates a TestServiceDigest from a TestService.
Args:
service: A _service.TestService.
@@ -396,51 +399,48 @@ def digest(service, control, pool):
Returns:
A TestServiceDigest synthesized from the given service.TestService.
"""
- identifiers = set()
-
- unary_unary = _assemble(
- service.unary_unary_scenarios(), identifiers, _InlineUnaryUnaryMethod,
- _EventUnaryUnaryMethod, _UnaryUnaryAdaptation, control, pool)
- identifiers.update(unary_unary.inlines)
-
- unary_stream = _assemble(
- service.unary_stream_scenarios(), identifiers, _InlineUnaryStreamMethod,
- _EventUnaryStreamMethod, _UnaryStreamAdaptation, control, pool)
- identifiers.update(unary_stream.inlines)
-
- stream_unary = _assemble(
- service.stream_unary_scenarios(), identifiers, _InlineStreamUnaryMethod,
- _EventStreamUnaryMethod, _StreamUnaryAdaptation, control, pool)
- identifiers.update(stream_unary.inlines)
-
- stream_stream = _assemble(
- service.stream_stream_scenarios(), identifiers, _InlineStreamStreamMethod,
- _EventStreamStreamMethod, _IDENTITY, control, pool)
- identifiers.update(stream_stream.inlines)
-
- methods = dict(unary_unary.methods)
- methods.update(unary_stream.methods)
- methods.update(stream_unary.methods)
- methods.update(stream_stream.methods)
- adaptations = dict(unary_unary.adaptations)
- adaptations.update(unary_stream.adaptations)
- adaptations.update(stream_unary.adaptations)
- adaptations.update(stream_stream.adaptations)
- inlines = dict(unary_unary.inlines)
- inlines.update(unary_stream.inlines)
- inlines.update(stream_unary.inlines)
- inlines.update(stream_stream.inlines)
- events = dict(unary_unary.events)
- events.update(unary_stream.events)
- events.update(stream_unary.events)
- events.update(stream_stream.events)
-
- return TestServiceDigest(
- methods,
- inlines,
- events,
- _MultiMethodImplementation(adaptations, control, pool),
- unary_unary.messages,
- unary_stream.messages,
- stream_unary.messages,
- stream_stream.messages)
+ identifiers = set()
+
+ unary_unary = _assemble(service.unary_unary_scenarios(), identifiers,
+ _InlineUnaryUnaryMethod, _EventUnaryUnaryMethod,
+ _UnaryUnaryAdaptation, control, pool)
+ identifiers.update(unary_unary.inlines)
+
+ unary_stream = _assemble(service.unary_stream_scenarios(), identifiers,
+ _InlineUnaryStreamMethod, _EventUnaryStreamMethod,
+ _UnaryStreamAdaptation, control, pool)
+ identifiers.update(unary_stream.inlines)
+
+ stream_unary = _assemble(service.stream_unary_scenarios(), identifiers,
+ _InlineStreamUnaryMethod, _EventStreamUnaryMethod,
+ _StreamUnaryAdaptation, control, pool)
+ identifiers.update(stream_unary.inlines)
+
+ stream_stream = _assemble(service.stream_stream_scenarios(), identifiers,
+ _InlineStreamStreamMethod,
+ _EventStreamStreamMethod, _IDENTITY, control,
+ pool)
+ identifiers.update(stream_stream.inlines)
+
+ methods = dict(unary_unary.methods)
+ methods.update(unary_stream.methods)
+ methods.update(stream_unary.methods)
+ methods.update(stream_stream.methods)
+ adaptations = dict(unary_unary.adaptations)
+ adaptations.update(unary_stream.adaptations)
+ adaptations.update(stream_unary.adaptations)
+ adaptations.update(stream_stream.adaptations)
+ inlines = dict(unary_unary.inlines)
+ inlines.update(unary_stream.inlines)
+ inlines.update(stream_unary.inlines)
+ inlines.update(stream_stream.inlines)
+ events = dict(unary_unary.events)
+ events.update(unary_stream.events)
+ events.update(stream_unary.events)
+ events.update(stream_stream.events)
+
+ return TestServiceDigest(
+ methods, inlines, events,
+ _MultiMethodImplementation(adaptations, control, pool),
+ unary_unary.messages, unary_stream.messages, stream_unary.messages,
+ stream_stream.messages)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
index df620b19ba..703eef3a82 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
@@ -55,457 +54,470 @@ from tests.unit.framework.interfaces.face import test_interfaces # pylint: disa
class _PauseableIterator(object):
- def __init__(self, upstream):
- self._upstream = upstream
- self._condition = threading.Condition()
- self._paused = False
+ def __init__(self, upstream):
+ self._upstream = upstream
+ self._condition = threading.Condition()
+ self._paused = False
- @contextlib.contextmanager
- def pause(self):
- with self._condition:
- self._paused = True
- yield
- with self._condition:
- self._paused = False
- self._condition.notify_all()
+ @contextlib.contextmanager
+ def pause(self):
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
- def __iter__(self):
- return self
+ def __iter__(self):
+ return self
- def __next__(self):
- return self.next()
+ def __next__(self):
+ return self.next()
- def next(self):
- with self._condition:
- while self._paused:
- self._condition.wait()
- return next(self._upstream)
+ def next(self):
+ with self._condition:
+ while self._paused:
+ self._condition.wait()
+ return next(self._upstream)
class _Callback(object):
- def __init__(self):
- self._condition = threading.Condition()
- self._called = False
- self._passed_future = None
- self._passed_other_stuff = None
-
- def __call__(self, *args, **kwargs):
- with self._condition:
- self._called = True
- if args:
- self._passed_future = args[0]
- if 1 < len(args) or kwargs:
- self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
- self._condition.notify_all()
-
- def future(self):
- with self._condition:
- while True:
- if self._passed_other_stuff is not None:
- raise ValueError(
- 'Test callback passed unexpected values: %s',
- self._passed_other_stuff)
- elif self._called:
- return self._passed_future
- else:
- self._condition.wait()
-
-
-class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.TestCase)):
- """A test of the Face layer of RPC Framework.
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._called = False
+ self._passed_future = None
+ self._passed_other_stuff = None
+
+ def __call__(self, *args, **kwargs):
+ with self._condition:
+ self._called = True
+ if args:
+ self._passed_future = args[0]
+ if 1 < len(args) or kwargs:
+ self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
+ self._condition.notify_all()
+
+ def future(self):
+ with self._condition:
+ while True:
+ if self._passed_other_stuff is not None:
+ raise ValueError(
+ 'Test callback passed unexpected values: %s',
+ self._passed_other_stuff)
+ elif self._called:
+ return self._passed_future
+ else:
+ self._condition.wait()
+
+
+class TestCase(
+ six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
+ unittest.TestCase)):
+ """A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
- NAME = 'FutureInvocationAsynchronousEventServiceTest'
+ NAME = 'FutureInvocationAsynchronousEventServiceTest'
- def setUp(self):
- """See unittest.TestCase.setUp for full specification.
+ def setUp(self):
+ """See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
- self._control = test_control.PauseFailControl()
- self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
- self._digest = _digest.digest(
- _stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
+ self._control = test_control.PauseFailControl()
+ self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
+ self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
+ self._control, self._digest_pool)
- generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
- self._digest.methods, self._digest.event_method_implementations, None)
- self._invoker = self.invoker_constructor.construct_invoker(
- generic_stub, dynamic_stubs, self._digest.methods)
+ generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
+ self._digest.methods, self._digest.event_method_implementations,
+ None)
+ self._invoker = self.invoker_constructor.construct_invoker(
+ generic_stub, dynamic_stubs, self._digest.methods)
- def tearDown(self):
- """See unittest.TestCase.tearDown for full specification.
+ def tearDown(self):
+ """See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
- self._invoker = None
- self.implementation.destantiate(self._memo)
- self._digest_pool.shutdown(wait=True)
-
- def testSuccessfulUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- response = response_future.result()
-
- test_messages.verify(request, response, self)
- self.assertIs(callback.future(), response_future)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- response_iterator = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(request, responses, self)
-
- def testSuccessfulStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- request_iterator = _PauseableIterator(iter(requests))
- callback = _Callback()
-
- # Use of a paused iterator of requests allows us to test that control is
- # returned to calling code before the iterator yields any requests.
- with request_iterator.pause():
- response_future = self._invoker.future(group, method)(
- request_iterator, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- future_passed_to_callback = callback.future()
- response = future_passed_to_callback.result()
-
- test_messages.verify(requests, response, self)
- self.assertIs(future_passed_to_callback, response_future)
- self.assertIsNone(response_future.exception())
- self.assertIsNone(response_future.traceback())
-
- def testSuccessfulStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- request_iterator = _PauseableIterator(iter(requests))
-
- # Use of a paused iterator of requests allows us to test that control is
- # returned to calling code before the iterator yields any requests.
- with request_iterator.pause():
- response_iterator = self._invoker.future(group, method)(
- request_iterator, test_constants.LONG_TIMEOUT)
- responses = list(response_iterator)
-
- test_messages.verify(requests, responses, self)
-
- def testSequentialInvocations(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response_future = self._invoker.future(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
- first_response = first_response_future.result()
-
- test_messages.verify(first_request, first_response, self)
-
- second_response_future = self._invoker.future(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
- second_response = second_response_future.result()
-
- test_messages.verify(second_request, second_response, self)
-
- def testParallelInvocations(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- first_request = test_messages.request()
- second_request = test_messages.request()
-
- first_response_future = self._invoker.future(group, method)(
- first_request, test_constants.LONG_TIMEOUT)
- second_response_future = self._invoker.future(group, method)(
- second_request, test_constants.LONG_TIMEOUT)
- first_response = first_response_future.result()
- second_response = second_response_future.result()
-
- test_messages.verify(first_request, first_response, self)
- test_messages.verify(second_request, second_response, self)
-
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures = []
- for _ in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- requests.append(request)
- response_futures.append(response_future)
-
- responses = [
- response_future.result() for response_future in response_futures]
-
- for request, response in zip(requests, responses):
- test_messages.verify(request, response, self)
-
- def testWaitingForSomeButNotAllParallelInvocations(self):
- pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = []
- response_futures_to_indices = {}
- for index in range(test_constants.THREAD_CONCURRENCY):
- request = test_messages.request()
- inner_response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- outer_response_future = pool.submit(inner_response_future.result)
- requests.append(request)
- response_futures_to_indices[outer_response_future] = index
-
- some_completed_response_futures_iterator = itertools.islice(
- futures.as_completed(response_futures_to_indices),
- test_constants.THREAD_CONCURRENCY // 2)
- for response_future in some_completed_response_futures_iterator:
- index = response_futures_to_indices[response_future]
- test_messages.verify(requests[index], response_future.result(), self)
- pool.shutdown(wait=True)
-
- def testCancelledUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- cancel_method_return_value = response_future.cancel()
-
- self.assertIs(callback.future(), response_future)
- self.assertFalse(cancel_method_return_value)
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(future.CancelledError):
- response_future.result()
- with self.assertRaises(future.CancelledError):
- response_future.exception()
- with self.assertRaises(future.CancelledError):
- response_future.traceback()
-
- def testCancelledUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- request, test_constants.LONG_TIMEOUT)
- response_iterator.cancel()
-
- with self.assertRaises(face.CancellationError):
- next(response_iterator)
-
- def testCancelledStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- response_future.add_done_callback(callback)
- cancel_method_return_value = response_future.cancel()
-
- self.assertIs(callback.future(), response_future)
- self.assertFalse(cancel_method_return_value)
- self.assertTrue(response_future.cancelled())
- with self.assertRaises(future.CancelledError):
- response_future.result()
- with self.assertRaises(future.CancelledError):
- response_future.exception()
- with self.assertRaises(future.CancelledError):
- response_future.traceback()
-
- def testCancelledStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- iter(requests), test_constants.LONG_TIMEOUT)
- response_iterator.cancel()
-
- with self.assertRaises(face.CancellationError):
- next(response_iterator)
-
- def testExpiredUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(
- group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- self.assertIs(callback.future(), response_future)
- self.assertIsInstance(
- response_future.exception(), face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsInstance(
- response_future.exception(), face.AbortionError)
- self.assertIsNotNone(response_future.traceback())
-
- def testExpiredUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(response_iterator)
-
- def testExpiredStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
-
- with self._control.pause():
- response_future = self._invoker.future(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- self.assertIs(callback.future(), response_future)
- self.assertIsInstance(
- response_future.exception(), face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsInstance(
- response_future.exception(), face.AbortionError)
- self.assertIsNotNone(response_future.traceback())
-
- def testExpiredStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- with self._control.pause():
- response_iterator = self._invoker.future(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
- with self.assertRaises(face.ExpirationError):
- list(response_iterator)
-
- def testFailedUnaryRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
- callback = _Callback()
- abortion_callback = _Callback()
-
- with self._control.fail():
- response_future = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- response_future.add_abortion_callback(abortion_callback)
-
- self.assertIs(callback.future(), response_future)
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is
- # indistinguishable from simply not having called its
- # response_callback before the expiration of the RPC.
- self.assertIsInstance(
- response_future.exception(), face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsNotNone(response_future.traceback())
- self.assertIsNotNone(abortion_callback.future())
-
- def testFailedUnaryRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.unary_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- request = test_messages.request()
-
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is indistinguishable
- # from simply not having called its response_consumer before the
- # expiration of the RPC.
- with self._control.fail(), self.assertRaises(face.ExpirationError):
- response_iterator = self._invoker.future(group, method)(
- request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
-
- def testFailedStreamRequestUnaryResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_unary_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
- callback = _Callback()
- abortion_callback = _Callback()
-
- with self._control.fail():
- response_future = self._invoker.future(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
- response_future.add_done_callback(callback)
- response_future.add_abortion_callback(abortion_callback)
-
- self.assertIs(callback.future(), response_future)
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is
- # indistinguishable from simply not having called its
- # response_callback before the expiration of the RPC.
- self.assertIsInstance(
- response_future.exception(), face.ExpirationError)
- with self.assertRaises(face.ExpirationError):
- response_future.result()
- self.assertIsNotNone(response_future.traceback())
- self.assertIsNotNone(abortion_callback.future())
-
- def testFailedStreamRequestStreamResponse(self):
- for (group, method), test_messages_sequence in (
- six.iteritems(self._digest.stream_stream_messages_sequences)):
- for test_messages in test_messages_sequence:
- requests = test_messages.requests()
-
- # Because the servicer fails outside of the thread from which the
- # servicer-side runtime called into it its failure is indistinguishable
- # from simply not having called its response_consumer before the
- # expiration of the RPC.
- with self._control.fail(), self.assertRaises(face.ExpirationError):
- response_iterator = self._invoker.future(group, method)(
- iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
- list(response_iterator)
+ self._invoker = None
+ self.implementation.destantiate(self._memo)
+ self._digest_pool.shutdown(wait=True)
+
+ def testSuccessfulUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = _Callback()
+
+ response_future = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ response_future.add_done_callback(callback)
+ response = response_future.result()
+
+ test_messages.verify(request, response, self)
+ self.assertIs(callback.future(), response_future)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ response_iterator = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(request, responses, self)
+
+ def testSuccessfulStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ request_iterator = _PauseableIterator(iter(requests))
+ callback = _Callback()
+
+ # Use of a paused iterator of requests allows us to test that control is
+ # returned to calling code before the iterator yields any requests.
+ with request_iterator.pause():
+ response_future = self._invoker.future(group, method)(
+ request_iterator, test_constants.LONG_TIMEOUT)
+ response_future.add_done_callback(callback)
+ future_passed_to_callback = callback.future()
+ response = future_passed_to_callback.result()
+
+ test_messages.verify(requests, response, self)
+ self.assertIs(future_passed_to_callback, response_future)
+ self.assertIsNone(response_future.exception())
+ self.assertIsNone(response_future.traceback())
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ request_iterator = _PauseableIterator(iter(requests))
+
+ # Use of a paused iterator of requests allows us to test that control is
+ # returned to calling code before the iterator yields any requests.
+ with request_iterator.pause():
+ response_iterator = self._invoker.future(group, method)(
+ request_iterator, test_constants.LONG_TIMEOUT)
+ responses = list(response_iterator)
+
+ test_messages.verify(requests, responses, self)
+
+ def testSequentialInvocations(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response_future = self._invoker.future(group, method)(
+ first_request, test_constants.LONG_TIMEOUT)
+ first_response = first_response_future.result()
+
+ test_messages.verify(first_request, first_response, self)
+
+ second_response_future = self._invoker.future(group, method)(
+ second_request, test_constants.LONG_TIMEOUT)
+ second_response = second_response_future.result()
+
+ test_messages.verify(second_request, second_response, self)
+
+ def testParallelInvocations(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ first_request = test_messages.request()
+ second_request = test_messages.request()
+
+ first_response_future = self._invoker.future(group, method)(
+ first_request, test_constants.LONG_TIMEOUT)
+ second_response_future = self._invoker.future(group, method)(
+ second_request, test_constants.LONG_TIMEOUT)
+ first_response = first_response_future.result()
+ second_response = second_response_future.result()
+
+ test_messages.verify(first_request, first_response, self)
+ test_messages.verify(second_request, second_response, self)
+
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = []
+ response_futures = []
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ request = test_messages.request()
+ response_future = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ requests.append(request)
+ response_futures.append(response_future)
+
+ responses = [
+ response_future.result()
+ for response_future in response_futures
+ ]
+
+ for request, response in zip(requests, responses):
+ test_messages.verify(request, response, self)
+
+ def testWaitingForSomeButNotAllParallelInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = []
+ response_futures_to_indices = {}
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request = test_messages.request()
+ inner_response_future = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ outer_response_future = pool.submit(
+ inner_response_future.result)
+ requests.append(request)
+ response_futures_to_indices[outer_response_future] = index
+
+ some_completed_response_futures_iterator = itertools.islice(
+ futures.as_completed(response_futures_to_indices),
+ test_constants.THREAD_CONCURRENCY // 2)
+ for response_future in some_completed_response_futures_iterator:
+ index = response_futures_to_indices[response_future]
+ test_messages.verify(requests[index],
+ response_future.result(), self)
+ pool.shutdown(wait=True)
+
+ def testCancelledUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = _Callback()
+
+ with self._control.pause():
+ response_future = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ response_future.add_done_callback(callback)
+ cancel_method_return_value = response_future.cancel()
+
+ self.assertIs(callback.future(), response_future)
+ self.assertFalse(cancel_method_return_value)
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(future.CancelledError):
+ response_future.result()
+ with self.assertRaises(future.CancelledError):
+ response_future.exception()
+ with self.assertRaises(future.CancelledError):
+ response_future.traceback()
+
+ def testCancelledUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.pause():
+ response_iterator = self._invoker.future(group, method)(
+ request, test_constants.LONG_TIMEOUT)
+ response_iterator.cancel()
+
+ with self.assertRaises(face.CancellationError):
+ next(response_iterator)
+
+ def testCancelledStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = _Callback()
+
+ with self._control.pause():
+ response_future = self._invoker.future(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT)
+ response_future.add_done_callback(callback)
+ cancel_method_return_value = response_future.cancel()
+
+ self.assertIs(callback.future(), response_future)
+ self.assertFalse(cancel_method_return_value)
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(future.CancelledError):
+ response_future.result()
+ with self.assertRaises(future.CancelledError):
+ response_future.exception()
+ with self.assertRaises(future.CancelledError):
+ response_future.traceback()
+
+ def testCancelledStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.pause():
+ response_iterator = self._invoker.future(group, method)(
+ iter(requests), test_constants.LONG_TIMEOUT)
+ response_iterator.cancel()
+
+ with self.assertRaises(face.CancellationError):
+ next(response_iterator)
+
+ def testExpiredUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = _Callback()
+
+ with self._control.pause():
+ response_future = self._invoker.future(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ response_future.add_done_callback(callback)
+ self.assertIs(callback.future(), response_future)
+ self.assertIsInstance(response_future.exception(),
+ face.ExpirationError)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+ self.assertIsInstance(response_future.exception(),
+ face.AbortionError)
+ self.assertIsNotNone(response_future.traceback())
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ with self._control.pause():
+ response_iterator = self._invoker.future(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ list(response_iterator)
+
+ def testExpiredStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = _Callback()
+
+ with self._control.pause():
+ response_future = self._invoker.future(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ response_future.add_done_callback(callback)
+ self.assertIs(callback.future(), response_future)
+ self.assertIsInstance(response_future.exception(),
+ face.ExpirationError)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+ self.assertIsInstance(response_future.exception(),
+ face.AbortionError)
+ self.assertIsNotNone(response_future.traceback())
+
+ def testExpiredStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ with self._control.pause():
+ response_iterator = self._invoker.future(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ with self.assertRaises(face.ExpirationError):
+ list(response_iterator)
+
+ def testFailedUnaryRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+ callback = _Callback()
+ abortion_callback = _Callback()
+
+ with self._control.fail():
+ response_future = self._invoker.future(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ response_future.add_done_callback(callback)
+ response_future.add_abortion_callback(abortion_callback)
+
+ self.assertIs(callback.future(), response_future)
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is
+ # indistinguishable from simply not having called its
+ # response_callback before the expiration of the RPC.
+ self.assertIsInstance(response_future.exception(),
+ face.ExpirationError)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIsNotNone(abortion_callback.future())
+
+ def testFailedUnaryRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.unary_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ request = test_messages.request()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_consumer before the
+ # expiration of the RPC.
+ with self._control.fail(), self.assertRaises(
+ face.ExpirationError):
+ response_iterator = self._invoker.future(group, method)(
+ request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ list(response_iterator)
+
+ def testFailedStreamRequestUnaryResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_unary_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+ callback = _Callback()
+ abortion_callback = _Callback()
+
+ with self._control.fail():
+ response_future = self._invoker.future(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ response_future.add_done_callback(callback)
+ response_future.add_abortion_callback(abortion_callback)
+
+ self.assertIs(callback.future(), response_future)
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is
+ # indistinguishable from simply not having called its
+ # response_callback before the expiration of the RPC.
+ self.assertIsInstance(response_future.exception(),
+ face.ExpirationError)
+ with self.assertRaises(face.ExpirationError):
+ response_future.result()
+ self.assertIsNotNone(response_future.traceback())
+ self.assertIsNotNone(abortion_callback.future())
+
+ def testFailedStreamRequestStreamResponse(self):
+ for (group, method), test_messages_sequence in (
+ six.iteritems(self._digest.stream_stream_messages_sequences)):
+ for test_messages in test_messages_sequence:
+ requests = test_messages.requests()
+
+ # Because the servicer fails outside of the thread from which the
+ # servicer-side runtime called into it its failure is indistinguishable
+ # from simply not having called its response_consumer before the
+ # expiration of the RPC.
+ with self._control.fail(), self.assertRaises(
+ face.ExpirationError):
+ response_iterator = self._invoker.future(group, method)(
+ iter(requests),
+ _3069_test_constant.REALLY_SHORT_TIMEOUT)
+ list(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
index ac487bed4f..4e144a3635 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Coverage across the Face layer's generic-to-dynamic range for invocation."""
import abc
@@ -65,149 +64,149 @@ _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE = {
class Invoker(six.with_metaclass(abc.ABCMeta)):
- """A type used to invoke test RPCs."""
+ """A type used to invoke test RPCs."""
- @abc.abstractmethod
- def blocking(self, group, name):
- """Invokes an RPC with blocking control flow."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def blocking(self, group, name):
+ """Invokes an RPC with blocking control flow."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def future(self, group, name):
- """Invokes an RPC with future control flow."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def future(self, group, name):
+ """Invokes an RPC with future control flow."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def event(self, group, name):
- """Invokes an RPC with event control flow."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def event(self, group, name):
+ """Invokes an RPC with event control flow."""
+ raise NotImplementedError()
class InvokerConstructor(six.with_metaclass(abc.ABCMeta)):
- """A type used to create Invokers."""
+ """A type used to create Invokers."""
- @abc.abstractmethod
- def name(self):
- """Specifies the name of the Invoker constructed by this object."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def name(self):
+ """Specifies the name of the Invoker constructed by this object."""
+ raise NotImplementedError()
- @abc.abstractmethod
- def construct_invoker(self, generic_stub, dynamic_stubs, methods):
- """Constructs an Invoker for the given stubs and methods."""
- raise NotImplementedError()
+ @abc.abstractmethod
+ def construct_invoker(self, generic_stub, dynamic_stubs, methods):
+ """Constructs an Invoker for the given stubs and methods."""
+ raise NotImplementedError()
class _GenericInvoker(Invoker):
- def __init__(self, generic_stub, methods):
- self._stub = generic_stub
- self._methods = methods
+ def __init__(self, generic_stub, methods):
+ self._stub = generic_stub
+ self._methods = methods
- def _behavior(self, group, name, cardinality_to_generic_method):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(
- self._stub, cardinality_to_generic_method[method_cardinality])
- return lambda *args, **kwargs: behavior(group, name, *args, **kwargs)
+ def _behavior(self, group, name, cardinality_to_generic_method):
+ method_cardinality = self._methods[group, name].cardinality()
+ behavior = getattr(self._stub,
+ cardinality_to_generic_method[method_cardinality])
+ return lambda *args, **kwargs: behavior(group, name, *args, **kwargs)
- def blocking(self, group, name):
- return self._behavior(
- group, name, _CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR)
+ def blocking(self, group, name):
+ return self._behavior(group, name,
+ _CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR)
- def future(self, group, name):
- return self._behavior(group, name, _CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR)
+ def future(self, group, name):
+ return self._behavior(group, name,
+ _CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR)
- def event(self, group, name):
- return self._behavior(group, name, _CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR)
+ def event(self, group, name):
+ return self._behavior(group, name,
+ _CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR)
class _GenericInvokerConstructor(InvokerConstructor):
- def name(self):
- return 'GenericInvoker'
+ def name(self):
+ return 'GenericInvoker'
- def construct_invoker(self, generic_stub, dynamic_stub, methods):
- return _GenericInvoker(generic_stub, methods)
+ def construct_invoker(self, generic_stub, dynamic_stub, methods):
+ return _GenericInvoker(generic_stub, methods)
class _MultiCallableInvoker(Invoker):
- def __init__(self, generic_stub, methods):
- self._stub = generic_stub
- self._methods = methods
+ def __init__(self, generic_stub, methods):
+ self._stub = generic_stub
+ self._methods = methods
- def _multi_callable(self, group, name):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(
- self._stub,
- _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
- return behavior(group, name)
+ def _multi_callable(self, group, name):
+ method_cardinality = self._methods[group, name].cardinality()
+ behavior = getattr(
+ self._stub,
+ _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
+ return behavior(group, name)
- def blocking(self, group, name):
- return self._multi_callable(group, name)
+ def blocking(self, group, name):
+ return self._multi_callable(group, name)
- def future(self, group, name):
- method_cardinality = self._methods[group, name].cardinality()
- behavior = getattr(
- self._stub,
- _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
- if method_cardinality in (
- cardinality.Cardinality.UNARY_UNARY,
- cardinality.Cardinality.STREAM_UNARY):
- return behavior(group, name).future
- else:
- return behavior(group, name)
+ def future(self, group, name):
+ method_cardinality = self._methods[group, name].cardinality()
+ behavior = getattr(
+ self._stub,
+ _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
+ if method_cardinality in (cardinality.Cardinality.UNARY_UNARY,
+ cardinality.Cardinality.STREAM_UNARY):
+ return behavior(group, name).future
+ else:
+ return behavior(group, name)
- def event(self, group, name):
- return self._multi_callable(group, name).event
+ def event(self, group, name):
+ return self._multi_callable(group, name).event
class _MultiCallableInvokerConstructor(InvokerConstructor):
- def name(self):
- return 'MultiCallableInvoker'
+ def name(self):
+ return 'MultiCallableInvoker'
- def construct_invoker(self, generic_stub, dynamic_stub, methods):
- return _MultiCallableInvoker(generic_stub, methods)
+ def construct_invoker(self, generic_stub, dynamic_stub, methods):
+ return _MultiCallableInvoker(generic_stub, methods)
class _DynamicInvoker(Invoker):
- def __init__(self, dynamic_stubs, methods):
- self._stubs = dynamic_stubs
- self._methods = methods
+ def __init__(self, dynamic_stubs, methods):
+ self._stubs = dynamic_stubs
+ self._methods = methods
- def blocking(self, group, name):
- return getattr(self._stubs[group], name)
+ def blocking(self, group, name):
+ return getattr(self._stubs[group], name)
- def future(self, group, name):
- if self._methods[group, name].cardinality() in (
- cardinality.Cardinality.UNARY_UNARY,
- cardinality.Cardinality.STREAM_UNARY):
- return getattr(self._stubs[group], name).future
- else:
- return getattr(self._stubs[group], name)
+ def future(self, group, name):
+ if self._methods[group, name].cardinality() in (
+ cardinality.Cardinality.UNARY_UNARY,
+ cardinality.Cardinality.STREAM_UNARY):
+ return getattr(self._stubs[group], name).future
+ else:
+ return getattr(self._stubs[group], name)
- def event(self, group, name):
- return getattr(self._stubs[group], name).event
+ def event(self, group, name):
+ return getattr(self._stubs[group], name).event
class _DynamicInvokerConstructor(InvokerConstructor):
- def name(self):
- return 'DynamicInvoker'
+ def name(self):
+ return 'DynamicInvoker'
- def construct_invoker(self, generic_stub, dynamic_stubs, methods):
- return _DynamicInvoker(dynamic_stubs, methods)
+ def construct_invoker(self, generic_stub, dynamic_stubs, methods):
+ return _DynamicInvoker(dynamic_stubs, methods)
def invoker_constructors():
- """Creates a sequence of InvokerConstructors to use in tests of RPCs.
+ """Creates a sequence of InvokerConstructors to use in tests of RPCs.
Returns:
A sequence of InvokerConstructors.
"""
- return (
- _GenericInvokerConstructor(),
- _MultiCallableInvokerConstructor(),
- _DynamicInvokerConstructor(),
- )
+ return (
+ _GenericInvokerConstructor(),
+ _MultiCallableInvokerConstructor(),
+ _DynamicInvokerConstructor(),)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
index f13dff0558..f14ac6a987 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Private interfaces implemented by data sets used in Face-layer tests."""
import abc
@@ -38,12 +37,13 @@ from grpc.framework.interfaces.face import face # pylint: disable=unused-import
from tests.unit.framework.interfaces.face import test_interfaces
-class UnaryUnaryTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a unary-unary method."""
+class UnaryUnaryTestMethodImplementation(
+ six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
+ """A controllable implementation of a unary-unary method."""
- @abc.abstractmethod
- def service(self, request, response_callback, context, control):
- """Services an RPC that accepts one message and produces one message.
+ @abc.abstractmethod
+ def service(self, request, response_callback, context, control):
+ """Services an RPC that accepts one message and produces one message.
Args:
request: The single request message for the RPC.
@@ -56,15 +56,15 @@ class UnaryUnaryTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_in
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class UnaryUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for unary-request-unary-response message pairings."""
+ """A type for unary-request-unary-response message pairings."""
- @abc.abstractmethod
- def request(self):
- """Affords a request message.
+ @abc.abstractmethod
+ def request(self):
+ """Affords a request message.
Implementations of this method should return a different message with each
call so that multiple test executions of the test method may be made with
@@ -73,11 +73,11 @@ class UnaryUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
Returns:
A request message.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def verify(self, request, response, test_case):
- """Verifies that the computed response matches the given request.
+ @abc.abstractmethod
+ def verify(self, request, response, test_case):
+ """Verifies that the computed response matches the given request.
Args:
request: A request message.
@@ -88,15 +88,16 @@ class UnaryUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
AssertionError: If the request and response do not match, indicating that
there was some problem executing the RPC under test.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
-class UnaryStreamTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a unary-stream method."""
+class UnaryStreamTestMethodImplementation(
+ six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
+ """A controllable implementation of a unary-stream method."""
- @abc.abstractmethod
- def service(self, request, response_consumer, context, control):
- """Services an RPC that takes one message and produces a stream of messages.
+ @abc.abstractmethod
+ def service(self, request, response_consumer, context, control):
+ """Services an RPC that takes one message and produces a stream of messages.
Args:
request: The single request message for the RPC.
@@ -109,15 +110,15 @@ class UnaryStreamTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_i
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class UnaryStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for unary-request-stream-response message pairings."""
+ """A type for unary-request-stream-response message pairings."""
- @abc.abstractmethod
- def request(self):
- """Affords a request message.
+ @abc.abstractmethod
+ def request(self):
+ """Affords a request message.
Implementations of this method should return a different message with each
call so that multiple test executions of the test method may be made with
@@ -126,11 +127,11 @@ class UnaryStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
Returns:
A request message.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def verify(self, request, responses, test_case):
- """Verifies that the computed responses match the given request.
+ @abc.abstractmethod
+ def verify(self, request, responses, test_case):
+ """Verifies that the computed responses match the given request.
Args:
request: A request message.
@@ -141,15 +142,16 @@ class UnaryStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
AssertionError: If the request and responses do not match, indicating that
there was some problem executing the RPC under test.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
-class StreamUnaryTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a stream-unary method."""
+class StreamUnaryTestMethodImplementation(
+ six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
+ """A controllable implementation of a stream-unary method."""
- @abc.abstractmethod
- def service(self, response_callback, context, control):
- """Services an RPC that takes a stream of messages and produces one message.
+ @abc.abstractmethod
+ def service(self, response_callback, context, control):
+ """Services an RPC that takes a stream of messages and produces one message.
Args:
response_callback: A callback to be called to accept the response message
@@ -169,15 +171,15 @@ class StreamUnaryTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_i
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for stream-request-unary-response message pairings."""
+ """A type for stream-request-unary-response message pairings."""
- @abc.abstractmethod
- def requests(self):
- """Affords a sequence of request messages.
+ @abc.abstractmethod
+ def requests(self):
+ """Affords a sequence of request messages.
Implementations of this method should return a different sequences with each
call so that multiple test executions of the test method may be made with
@@ -186,11 +188,11 @@ class StreamUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
Returns:
A sequence of request messages.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def verify(self, requests, response, test_case):
- """Verifies that the computed response matches the given requests.
+ @abc.abstractmethod
+ def verify(self, requests, response, test_case):
+ """Verifies that the computed response matches the given requests.
Args:
requests: A sequence of request messages.
@@ -201,15 +203,16 @@ class StreamUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
AssertionError: If the requests and response do not match, indicating that
there was some problem executing the RPC under test.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
-class StreamStreamTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
- """A controllable implementation of a stream-stream method."""
+class StreamStreamTestMethodImplementation(
+ six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
+ """A controllable implementation of a stream-stream method."""
- @abc.abstractmethod
- def service(self, response_consumer, context, control):
- """Services an RPC that accepts and produces streams of messages.
+ @abc.abstractmethod
+ def service(self, response_consumer, context, control):
+ """Services an RPC that accepts and produces streams of messages.
Args:
response_consumer: A stream.Consumer to be called to accept the response
@@ -229,15 +232,15 @@ class StreamStreamTestMethodImplementation(six.with_metaclass(abc.ABCMeta, test_
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class StreamStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
- """A type for stream-request-stream-response message pairings."""
+ """A type for stream-request-stream-response message pairings."""
- @abc.abstractmethod
- def requests(self):
- """Affords a sequence of request messages.
+ @abc.abstractmethod
+ def requests(self):
+ """Affords a sequence of request messages.
Implementations of this method should return a different sequences with each
call so that multiple test executions of the test method may be made with
@@ -246,11 +249,11 @@ class StreamStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
Returns:
A sequence of request messages.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def verify(self, requests, responses, test_case):
- """Verifies that the computed response matches the given requests.
+ @abc.abstractmethod
+ def verify(self, requests, responses, test_case):
+ """Verifies that the computed response matches the given requests.
Args:
requests: A sequence of request messages.
@@ -261,15 +264,15 @@ class StreamStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
AssertionError: If the requests and responses do not match, indicating
that there was some problem executing the RPC under test.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class TestService(six.with_metaclass(abc.ABCMeta)):
- """A specification of implemented methods to use in tests."""
+ """A specification of implemented methods to use in tests."""
- @abc.abstractmethod
- def unary_unary_scenarios(self):
- """Affords unary-request-unary-response test methods and their messages.
+ @abc.abstractmethod
+ def unary_unary_scenarios(self):
+ """Affords unary-request-unary-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
@@ -277,11 +280,11 @@ class TestService(six.with_metaclass(abc.ABCMeta)):
and the second element is a sequence of UnaryUnaryTestMethodMessages
objects.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def unary_stream_scenarios(self):
- """Affords unary-request-stream-response test methods and their messages.
+ @abc.abstractmethod
+ def unary_stream_scenarios(self):
+ """Affords unary-request-stream-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
@@ -289,11 +292,11 @@ class TestService(six.with_metaclass(abc.ABCMeta)):
object and the second element is a sequence of
UnaryStreamTestMethodMessages objects.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_unary_scenarios(self):
- """Affords stream-request-unary-response test methods and their messages.
+ @abc.abstractmethod
+ def stream_unary_scenarios(self):
+ """Affords stream-request-unary-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
@@ -301,11 +304,11 @@ class TestService(six.with_metaclass(abc.ABCMeta)):
object and the second element is a sequence of
StreamUnaryTestMethodMessages objects.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def stream_stream_scenarios(self):
- """Affords stream-request-stream-response test methods and their messages.
+ @abc.abstractmethod
+ def stream_stream_scenarios(self):
+ """Affords stream-request-stream-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
@@ -313,4 +316,4 @@ class TestService(six.with_metaclass(abc.ABCMeta)):
object and the second element is a sequence of
StreamStreamTestMethodMessages objects.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
index 5299655bb3..41a55c13f4 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Examples of Python implementations of the stock.proto Stock service."""
from grpc.framework.common import cardinality
@@ -44,353 +43,363 @@ _price = lambda symbol_name: float(hash(symbol_name) % 4096)
def _get_last_trade_price(stock_request, stock_reply_callback, control, active):
- """A unary-request, unary-response test method."""
- control.control()
- if active():
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol, price=_price(stock_request.symbol)))
- else:
- raise abandonment.Abandoned()
-
-
-def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
- """A stream-request, stream-response test method."""
- def stock_reply_for_stock_request(stock_request):
+ """A unary-request, unary-response test method."""
control.control()
if active():
- return stock_pb2.StockReply(
- symbol=stock_request.symbol, price=_price(stock_request.symbol))
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=_price(
+ stock_request.symbol)))
else:
- raise abandonment.Abandoned()
-
- class StockRequestConsumer(stream.Consumer):
+ raise abandonment.Abandoned()
- def consume(self, stock_request):
- stock_reply_consumer.consume(stock_reply_for_stock_request(stock_request))
- def terminate(self):
- control.control()
- stock_reply_consumer.terminate()
+def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
+ """A stream-request, stream-response test method."""
- def consume_and_terminate(self, stock_request):
- stock_reply_consumer.consume_and_terminate(
- stock_reply_for_stock_request(stock_request))
+ def stock_reply_for_stock_request(stock_request):
+ control.control()
+ if active():
+ return stock_pb2.StockReply(
+ symbol=stock_request.symbol, price=_price(stock_request.symbol))
+ else:
+ raise abandonment.Abandoned()
- return StockRequestConsumer()
+ class StockRequestConsumer(stream.Consumer):
+ def consume(self, stock_request):
+ stock_reply_consumer.consume(
+ stock_reply_for_stock_request(stock_request))
-def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
- """A unary-request, stream-response test method."""
- base_price = _price(stock_request.symbol)
- for index in range(stock_request.num_trades_to_watch):
- control.control()
- if active():
- stock_reply_consumer.consume(
- stock_pb2.StockReply(
- symbol=stock_request.symbol, price=base_price + index))
- else:
- raise abandonment.Abandoned()
- stock_reply_consumer.terminate()
+ def terminate(self):
+ control.control()
+ stock_reply_consumer.terminate()
+ def consume_and_terminate(self, stock_request):
+ stock_reply_consumer.consume_and_terminate(
+ stock_reply_for_stock_request(stock_request))
-def _get_highest_trade_price(stock_reply_callback, control, active):
- """A stream-request, unary-response test method."""
+ return StockRequestConsumer()
- class StockRequestConsumer(stream.Consumer):
- """Keeps an ongoing record of the most valuable symbol yet consumed."""
- def __init__(self):
- self._symbol = None
- self._price = None
-
- def consume(self, stock_request):
- control.control()
- if active():
- if self._price is None:
- self._symbol = stock_request.symbol
- self._price = _price(stock_request.symbol)
- else:
- candidate_price = _price(stock_request.symbol)
- if self._price < candidate_price:
- self._symbol = stock_request.symbol
- self._price = candidate_price
-
- def terminate(self):
- control.control()
- if active():
- if self._symbol is None:
- raise ValueError()
- else:
- stock_reply_callback(
- stock_pb2.StockReply(symbol=self._symbol, price=self._price))
- self._symbol = None
- self._price = None
-
- def consume_and_terminate(self, stock_request):
- control.control()
- if active():
- if self._price is None:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol,
- price=_price(stock_request.symbol)))
- else:
- candidate_price = _price(stock_request.symbol)
- if self._price < candidate_price:
- stock_reply_callback(
- stock_pb2.StockReply(
- symbol=stock_request.symbol, price=candidate_price))
- else:
- stock_reply_callback(
+def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
+ """A unary-request, stream-response test method."""
+ base_price = _price(stock_request.symbol)
+ for index in range(stock_request.num_trades_to_watch):
+ control.control()
+ if active():
+ stock_reply_consumer.consume(
stock_pb2.StockReply(
- symbol=self._symbol, price=self._price))
+ symbol=stock_request.symbol, price=base_price + index))
+ else:
+ raise abandonment.Abandoned()
+ stock_reply_consumer.terminate()
- self._symbol = None
- self._price = None
- return StockRequestConsumer()
+def _get_highest_trade_price(stock_reply_callback, control, active):
+ """A stream-request, unary-response test method."""
+
+ class StockRequestConsumer(stream.Consumer):
+ """Keeps an ongoing record of the most valuable symbol yet consumed."""
+
+ def __init__(self):
+ self._symbol = None
+ self._price = None
+
+ def consume(self, stock_request):
+ control.control()
+ if active():
+ if self._price is None:
+ self._symbol = stock_request.symbol
+ self._price = _price(stock_request.symbol)
+ else:
+ candidate_price = _price(stock_request.symbol)
+ if self._price < candidate_price:
+ self._symbol = stock_request.symbol
+ self._price = candidate_price
+
+ def terminate(self):
+ control.control()
+ if active():
+ if self._symbol is None:
+ raise ValueError()
+ else:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=self._symbol, price=self._price))
+ self._symbol = None
+ self._price = None
+
+ def consume_and_terminate(self, stock_request):
+ control.control()
+ if active():
+ if self._price is None:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol,
+ price=_price(stock_request.symbol)))
+ else:
+ candidate_price = _price(stock_request.symbol)
+ if self._price < candidate_price:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=stock_request.symbol,
+ price=candidate_price))
+ else:
+ stock_reply_callback(
+ stock_pb2.StockReply(
+ symbol=self._symbol, price=self._price))
+
+ self._symbol = None
+ self._price = None
+
+ return StockRequestConsumer()
class GetLastTradePrice(_service.UnaryUnaryTestMethodImplementation):
- """GetLastTradePrice for use in tests."""
+ """GetLastTradePrice for use in tests."""
- def group(self):
- return _STOCK_GROUP_NAME
+ def group(self):
+ return _STOCK_GROUP_NAME
- def name(self):
- return 'GetLastTradePrice'
+ def name(self):
+ return 'GetLastTradePrice'
- def cardinality(self):
- return cardinality.Cardinality.UNARY_UNARY
+ def cardinality(self):
+ return cardinality.Cardinality.UNARY_UNARY
- def request_class(self):
- return stock_pb2.StockRequest
+ def request_class(self):
+ return stock_pb2.StockRequest
- def response_class(self):
- return stock_pb2.StockReply
+ def response_class(self):
+ return stock_pb2.StockReply
- def serialize_request(self, request):
- return request.SerializeToString()
+ def serialize_request(self, request):
+ return request.SerializeToString()
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
- def serialize_response(self, response):
- return response.SerializeToString()
+ def serialize_response(self, response):
+ return response.SerializeToString()
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
- def service(self, request, response_callback, context, control):
- _get_last_trade_price(
- request, response_callback, control, context.is_active)
+ def service(self, request, response_callback, context, control):
+ _get_last_trade_price(request, response_callback, control,
+ context.is_active)
class GetLastTradePriceMessages(_service.UnaryUnaryTestMessages):
- def __init__(self):
- self._index = 0
+ def __init__(self):
+ self._index = 0
- def request(self):
- symbol = _SYMBOL_FORMAT % self._index
- self._index += 1
- return stock_pb2.StockRequest(symbol=symbol)
+ def request(self):
+ symbol = _SYMBOL_FORMAT % self._index
+ self._index += 1
+ return stock_pb2.StockRequest(symbol=symbol)
- def verify(self, request, response, test_case):
- test_case.assertEqual(request.symbol, response.symbol)
- test_case.assertEqual(_price(request.symbol), response.price)
+ def verify(self, request, response, test_case):
+ test_case.assertEqual(request.symbol, response.symbol)
+ test_case.assertEqual(_price(request.symbol), response.price)
class GetLastTradePriceMultiple(_service.StreamStreamTestMethodImplementation):
- """GetLastTradePriceMultiple for use in tests."""
+ """GetLastTradePriceMultiple for use in tests."""
- def group(self):
- return _STOCK_GROUP_NAME
+ def group(self):
+ return _STOCK_GROUP_NAME
- def name(self):
- return 'GetLastTradePriceMultiple'
+ def name(self):
+ return 'GetLastTradePriceMultiple'
- def cardinality(self):
- return cardinality.Cardinality.STREAM_STREAM
+ def cardinality(self):
+ return cardinality.Cardinality.STREAM_STREAM
- def request_class(self):
- return stock_pb2.StockRequest
+ def request_class(self):
+ return stock_pb2.StockRequest
- def response_class(self):
- return stock_pb2.StockReply
+ def response_class(self):
+ return stock_pb2.StockReply
- def serialize_request(self, request):
- return request.SerializeToString()
+ def serialize_request(self, request):
+ return request.SerializeToString()
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
- def serialize_response(self, response):
- return response.SerializeToString()
+ def serialize_response(self, response):
+ return response.SerializeToString()
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
- def service(self, response_consumer, context, control):
- return _get_last_trade_price_multiple(
- response_consumer, control, context.is_active)
+ def service(self, response_consumer, context, control):
+ return _get_last_trade_price_multiple(response_consumer, control,
+ context.is_active)
class GetLastTradePriceMultipleMessages(_service.StreamStreamTestMessages):
- """Pairs of message streams for use with GetLastTradePriceMultiple."""
+ """Pairs of message streams for use with GetLastTradePriceMultiple."""
- def __init__(self):
- self._index = 0
+ def __init__(self):
+ self._index = 0
- def requests(self):
- base_index = self._index
- self._index += 1
- return [
- stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % (base_index + index))
- for index in range(test_constants.STREAM_LENGTH)]
+ def requests(self):
+ base_index = self._index
+ self._index += 1
+ return [
+ stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % (base_index + index))
+ for index in range(test_constants.STREAM_LENGTH)
+ ]
- def verify(self, requests, responses, test_case):
- test_case.assertEqual(len(requests), len(responses))
- for stock_request, stock_reply in zip(requests, responses):
- test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
- test_case.assertEqual(_price(stock_request.symbol), stock_reply.price)
+ def verify(self, requests, responses, test_case):
+ test_case.assertEqual(len(requests), len(responses))
+ for stock_request, stock_reply in zip(requests, responses):
+ test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
+ test_case.assertEqual(
+ _price(stock_request.symbol), stock_reply.price)
class WatchFutureTrades(_service.UnaryStreamTestMethodImplementation):
- """WatchFutureTrades for use in tests."""
+ """WatchFutureTrades for use in tests."""
- def group(self):
- return _STOCK_GROUP_NAME
+ def group(self):
+ return _STOCK_GROUP_NAME
- def name(self):
- return 'WatchFutureTrades'
+ def name(self):
+ return 'WatchFutureTrades'
- def cardinality(self):
- return cardinality.Cardinality.UNARY_STREAM
+ def cardinality(self):
+ return cardinality.Cardinality.UNARY_STREAM
- def request_class(self):
- return stock_pb2.StockRequest
+ def request_class(self):
+ return stock_pb2.StockRequest
- def response_class(self):
- return stock_pb2.StockReply
+ def response_class(self):
+ return stock_pb2.StockReply
- def serialize_request(self, request):
- return request.SerializeToString()
+ def serialize_request(self, request):
+ return request.SerializeToString()
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
- def serialize_response(self, response):
- return response.SerializeToString()
+ def serialize_response(self, response):
+ return response.SerializeToString()
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
- def service(self, request, response_consumer, context, control):
- _watch_future_trades(request, response_consumer, control, context.is_active)
+ def service(self, request, response_consumer, context, control):
+ _watch_future_trades(request, response_consumer, control,
+ context.is_active)
class WatchFutureTradesMessages(_service.UnaryStreamTestMessages):
- """Pairs of a single request message and a sequence of response messages."""
+ """Pairs of a single request message and a sequence of response messages."""
- def __init__(self):
- self._index = 0
+ def __init__(self):
+ self._index = 0
- def request(self):
- symbol = _SYMBOL_FORMAT % self._index
- self._index += 1
- return stock_pb2.StockRequest(
- symbol=symbol, num_trades_to_watch=test_constants.STREAM_LENGTH)
+ def request(self):
+ symbol = _SYMBOL_FORMAT % self._index
+ self._index += 1
+ return stock_pb2.StockRequest(
+ symbol=symbol, num_trades_to_watch=test_constants.STREAM_LENGTH)
- def verify(self, request, responses, test_case):
- test_case.assertEqual(test_constants.STREAM_LENGTH, len(responses))
- base_price = _price(request.symbol)
- for index, response in enumerate(responses):
- test_case.assertEqual(base_price + index, response.price)
+ def verify(self, request, responses, test_case):
+ test_case.assertEqual(test_constants.STREAM_LENGTH, len(responses))
+ base_price = _price(request.symbol)
+ for index, response in enumerate(responses):
+ test_case.assertEqual(base_price + index, response.price)
class GetHighestTradePrice(_service.StreamUnaryTestMethodImplementation):
- """GetHighestTradePrice for use in tests."""
+ """GetHighestTradePrice for use in tests."""
- def group(self):
- return _STOCK_GROUP_NAME
+ def group(self):
+ return _STOCK_GROUP_NAME
- def name(self):
- return 'GetHighestTradePrice'
+ def name(self):
+ return 'GetHighestTradePrice'
- def cardinality(self):
- return cardinality.Cardinality.STREAM_UNARY
+ def cardinality(self):
+ return cardinality.Cardinality.STREAM_UNARY
- def request_class(self):
- return stock_pb2.StockRequest
+ def request_class(self):
+ return stock_pb2.StockRequest
- def response_class(self):
- return stock_pb2.StockReply
+ def response_class(self):
+ return stock_pb2.StockReply
- def serialize_request(self, request):
- return request.SerializeToString()
+ def serialize_request(self, request):
+ return request.SerializeToString()
- def deserialize_request(self, serialized_request):
- return stock_pb2.StockRequest.FromString(serialized_request)
+ def deserialize_request(self, serialized_request):
+ return stock_pb2.StockRequest.FromString(serialized_request)
- def serialize_response(self, response):
- return response.SerializeToString()
+ def serialize_response(self, response):
+ return response.SerializeToString()
- def deserialize_response(self, serialized_response):
- return stock_pb2.StockReply.FromString(serialized_response)
+ def deserialize_response(self, serialized_response):
+ return stock_pb2.StockReply.FromString(serialized_response)
- def service(self, response_callback, context, control):
- return _get_highest_trade_price(
- response_callback, control, context.is_active)
+ def service(self, response_callback, context, control):
+ return _get_highest_trade_price(response_callback, control,
+ context.is_active)
class GetHighestTradePriceMessages(_service.StreamUnaryTestMessages):
- def requests(self):
- return [
- stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % index)
- for index in range(test_constants.STREAM_LENGTH)]
-
- def verify(self, requests, response, test_case):
- price = None
- symbol = None
- for stock_request in requests:
- current_symbol = stock_request.symbol
- current_price = _price(current_symbol)
- if price is None or price < current_price:
- price = current_price
- symbol = current_symbol
- test_case.assertEqual(price, response.price)
- test_case.assertEqual(symbol, response.symbol)
+ def requests(self):
+ return [
+ stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % index)
+ for index in range(test_constants.STREAM_LENGTH)
+ ]
+
+ def verify(self, requests, response, test_case):
+ price = None
+ symbol = None
+ for stock_request in requests:
+ current_symbol = stock_request.symbol
+ current_price = _price(current_symbol)
+ if price is None or price < current_price:
+ price = current_price
+ symbol = current_symbol
+ test_case.assertEqual(price, response.price)
+ test_case.assertEqual(symbol, response.symbol)
class StockTestService(_service.TestService):
- """A corpus of test data with one method of each RPC cardinality."""
-
- def unary_unary_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetLastTradePrice'): (
- GetLastTradePrice(), [GetLastTradePriceMessages()]),
- }
-
- def unary_stream_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'WatchFutureTrades'): (
- WatchFutureTrades(), [WatchFutureTradesMessages()]),
- }
-
- def stream_unary_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetHighestTradePrice'): (
- GetHighestTradePrice(), [GetHighestTradePriceMessages()])
- }
-
- def stream_stream_scenarios(self):
- return {
- (_STOCK_GROUP_NAME, 'GetLastTradePriceMultiple'): (
- GetLastTradePriceMultiple(), [GetLastTradePriceMultipleMessages()]),
- }
+ """A corpus of test data with one method of each RPC cardinality."""
+
+ def unary_unary_scenarios(self):
+ return {
+ (_STOCK_GROUP_NAME, 'GetLastTradePrice'):
+ (GetLastTradePrice(), [GetLastTradePriceMessages()]),
+ }
+
+ def unary_stream_scenarios(self):
+ return {
+ (_STOCK_GROUP_NAME, 'WatchFutureTrades'):
+ (WatchFutureTrades(), [WatchFutureTradesMessages()]),
+ }
+
+ def stream_unary_scenarios(self):
+ return {
+ (_STOCK_GROUP_NAME, 'GetHighestTradePrice'):
+ (GetHighestTradePrice(), [GetHighestTradePriceMessages()])
+ }
+
+ def stream_stream_scenarios(self):
+ return {
+ (_STOCK_GROUP_NAME, 'GetLastTradePriceMultiple'):
+ (GetLastTradePriceMultiple(),
+ [GetLastTradePriceMultipleMessages()]),
+ }
STOCK_TEST_SERVICE = StockTestService()
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
index 71de9d835e..d84e1fc136 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Tools for creating tests of implementations of the Face layer."""
# unittest is referenced from specification in this module.
@@ -40,12 +39,11 @@ from tests.unit.framework.interfaces.face import test_interfaces # pylint: disa
_TEST_CASE_SUPERCLASSES = (
_blocking_invocation_inline_service.TestCase,
- _future_invocation_asynchronous_event_service.TestCase,
-)
+ _future_invocation_asynchronous_event_service.TestCase,)
def test_cases(implementation):
- """Creates unittest.TestCase classes for a given Face layer implementation.
+ """Creates unittest.TestCase classes for a given Face layer implementation.
Args:
implementation: A test_interfaces.Implementation specifying creation and
@@ -55,13 +53,14 @@ def test_cases(implementation):
A sequence of subclasses of unittest.TestCase defining tests of the
specified Face layer implementation.
"""
- test_case_classes = []
- for invoker_constructor in _invocation.invoker_constructors():
- for super_class in _TEST_CASE_SUPERCLASSES:
- test_case_classes.append(
- type(invoker_constructor.name() + super_class.NAME, (super_class,),
- {'implementation': implementation,
- 'invoker_constructor': invoker_constructor,
- '__module__': implementation.__module__,
- }))
- return test_case_classes
+ test_case_classes = []
+ for invoker_constructor in _invocation.invoker_constructors():
+ for super_class in _TEST_CASE_SUPERCLASSES:
+ test_case_classes.append(
+ type(invoker_constructor.name() + super_class.NAME, (
+ super_class,), {
+ 'implementation': implementation,
+ 'invoker_constructor': invoker_constructor,
+ '__module__': implementation.__module__,
+ }))
+ return test_case_classes
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
index 40f38e68ba..a789d435b4 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Interfaces used in tests of implementations of the Face layer."""
import abc
@@ -38,103 +37,102 @@ from grpc.framework.interfaces.face import face # pylint: disable=unused-import
class Method(six.with_metaclass(abc.ABCMeta)):
- """Specifies a method to be used in tests."""
+ """Specifies a method to be used in tests."""
- @abc.abstractmethod
- def group(self):
- """Identify the group of the method.
+ @abc.abstractmethod
+ def group(self):
+ """Identify the group of the method.
Returns:
The group of the method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def name(self):
- """Identify the name of the method.
+ @abc.abstractmethod
+ def name(self):
+ """Identify the name of the method.
Returns:
The name of the method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def cardinality(self):
- """Identify the cardinality of the method.
+ @abc.abstractmethod
+ def cardinality(self):
+ """Identify the cardinality of the method.
Returns:
A cardinality.Cardinality value describing the streaming semantics of the
method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def request_class(self):
- """Identify the class used for the method's request objects.
+ @abc.abstractmethod
+ def request_class(self):
+ """Identify the class used for the method's request objects.
Returns:
The class object of the class to which the method's request objects
belong.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def response_class(self):
- """Identify the class used for the method's response objects.
+ @abc.abstractmethod
+ def response_class(self):
+ """Identify the class used for the method's response objects.
Returns:
The class object of the class to which the method's response objects
belong.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def serialize_request(self, request):
- """Serialize the given request object.
+ @abc.abstractmethod
+ def serialize_request(self, request):
+ """Serialize the given request object.
Args:
request: A request object appropriate for this method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def deserialize_request(self, serialized_request):
- """Synthesize a request object from a given bytestring.
+ @abc.abstractmethod
+ def deserialize_request(self, serialized_request):
+ """Synthesize a request object from a given bytestring.
Args:
serialized_request: A bytestring deserializable into a request object
appropriate for this method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def serialize_response(self, response):
- """Serialize the given response object.
+ @abc.abstractmethod
+ def serialize_response(self, response):
+ """Serialize the given response object.
Args:
response: A response object appropriate for this method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def deserialize_response(self, serialized_response):
- """Synthesize a response object from a given bytestring.
+ @abc.abstractmethod
+ def deserialize_response(self, serialized_response):
+ """Synthesize a response object from a given bytestring.
Args:
serialized_response: A bytestring deserializable into a response object
appropriate for this method.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
class Implementation(six.with_metaclass(abc.ABCMeta)):
- """Specifies an implementation of the Face layer."""
+ """Specifies an implementation of the Face layer."""
- @abc.abstractmethod
- def instantiate(
- self, methods, method_implementations,
- multi_method_implementation):
- """Instantiates the Face layer implementation to be used in a test.
+ @abc.abstractmethod
+ def instantiate(self, methods, method_implementations,
+ multi_method_implementation):
+ """Instantiates the Face layer implementation to be used in a test.
Args:
methods: A sequence of Method objects describing the methods available to
@@ -151,69 +149,69 @@ class Implementation(six.with_metaclass(abc.ABCMeta)):
passed to destantiate at the conclusion of the test. The returned stubs
must be backed by the provided implementations.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def destantiate(self, memo):
- """Destroys the Face layer implementation under test.
+ @abc.abstractmethod
+ def destantiate(self, memo):
+ """Destroys the Face layer implementation under test.
Args:
memo: The object from the third position of the return value of a call to
instantiate.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def invocation_metadata(self):
- """Provides the metadata to be used when invoking a test RPC.
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Provides the metadata to be used when invoking a test RPC.
Returns:
An object to use as the supplied-at-invocation-time metadata in a test
RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def initial_metadata(self):
- """Provides the metadata for use as a test RPC's first servicer metadata.
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Provides the metadata for use as a test RPC's first servicer metadata.
Returns:
An object to use as the from-the-servicer-before-responses metadata in a
test RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def terminal_metadata(self):
- """Provides the metadata for use as a test RPC's second servicer metadata.
+ @abc.abstractmethod
+ def terminal_metadata(self):
+ """Provides the metadata for use as a test RPC's second servicer metadata.
Returns:
An object to use as the from-the-servicer-after-all-responses metadata in
a test RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def code(self):
- """Provides the value for use as a test RPC's code.
+ @abc.abstractmethod
+ def code(self):
+ """Provides the value for use as a test RPC's code.
Returns:
An object to use as the from-the-servicer code in a test RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def details(self):
- """Provides the value for use as a test RPC's details.
+ @abc.abstractmethod
+ def details(self):
+ """Provides the value for use as a test RPC's details.
Returns:
An object to use as the from-the-servicer details in a test RPC.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
- @abc.abstractmethod
- def metadata_transmitted(self, original_metadata, transmitted_metadata):
- """Identifies whether or not metadata was properly transmitted.
+ @abc.abstractmethod
+ def metadata_transmitted(self, original_metadata, transmitted_metadata):
+ """Identifies whether or not metadata was properly transmitted.
Args:
original_metadata: A metadata value passed to the Face interface
@@ -226,4 +224,4 @@ class Implementation(six.with_metaclass(abc.ABCMeta)):
Whether or not the metadata was properly transmitted by the Face interface
implementation under test.
"""
- raise NotImplementedError()
+ raise NotImplementedError()
diff --git a/src/python/grpcio_tests/tests/unit/resources.py b/src/python/grpcio_tests/tests/unit/resources.py
index 023cdb155f..55a2fff979 100644
--- a/src/python/grpcio_tests/tests/unit/resources.py
+++ b/src/python/grpcio_tests/tests/unit/resources.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Constants and functions for data used in interoperability testing."""
import os
@@ -39,14 +38,14 @@ _CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem'
def test_root_certificates():
- return pkg_resources.resource_string(
- __name__, _ROOT_CERTIFICATES_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__,
+ _ROOT_CERTIFICATES_RESOURCE_PATH)
def private_key():
- return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
def certificate_chain():
- return pkg_resources.resource_string(
- __name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
+ return pkg_resources.resource_string(__name__,
+ _CERTIFICATE_CHAIN_RESOURCE_PATH)
diff --git a/src/python/grpcio_tests/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py
index cd71bd80d7..00fbe0567a 100644
--- a/src/python/grpcio_tests/tests/unit/test_common.py
+++ b/src/python/grpcio_tests/tests/unit/test_common.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""Common code used throughout tests of gRPC."""
import collections
@@ -34,14 +33,23 @@ import collections
import grpc
import six
-INVOCATION_INITIAL_METADATA = (('0', 'abc'), ('1', 'def'), ('2', 'ghi'),)
-SERVICE_INITIAL_METADATA = (('3', 'jkl'), ('4', 'mno'), ('5', 'pqr'),)
-SERVICE_TERMINAL_METADATA = (('6', 'stu'), ('7', 'vwx'), ('8', 'yza'),)
+INVOCATION_INITIAL_METADATA = (
+ ('0', 'abc'),
+ ('1', 'def'),
+ ('2', 'ghi'),)
+SERVICE_INITIAL_METADATA = (
+ ('3', 'jkl'),
+ ('4', 'mno'),
+ ('5', 'pqr'),)
+SERVICE_TERMINAL_METADATA = (
+ ('6', 'stu'),
+ ('7', 'vwx'),
+ ('8', 'yza'),)
DETAILS = 'test details'
def metadata_transmitted(original_metadata, transmitted_metadata):
- """Judges whether or not metadata was acceptably transmitted.
+ """Judges whether or not metadata was acceptably transmitted.
gRPC is allowed to insert key-value pairs into the metadata values given by
applications and to reorder key-value pairs with different keys but it is not
@@ -59,31 +67,30 @@ def metadata_transmitted(original_metadata, transmitted_metadata):
A boolean indicating whether transmitted_metadata accurately reflects
original_metadata after having been transmitted via gRPC.
"""
- original = collections.defaultdict(list)
- for key, value in original_metadata:
- original[key].append(value)
- transmitted = collections.defaultdict(list)
- for key, value in transmitted_metadata:
- transmitted[key].append(value)
+ original = collections.defaultdict(list)
+ for key, value in original_metadata:
+ original[key].append(value)
+ transmitted = collections.defaultdict(list)
+ for key, value in transmitted_metadata:
+ transmitted[key].append(value)
- for key, values in six.iteritems(original):
- transmitted_values = transmitted[key]
- transmitted_iterator = iter(transmitted_values)
- try:
- for value in values:
- while True:
- transmitted_value = next(transmitted_iterator)
- if value == transmitted_value:
- break
- except StopIteration:
- return False
- else:
- return True
+ for key, values in six.iteritems(original):
+ transmitted_values = transmitted[key]
+ transmitted_iterator = iter(transmitted_values)
+ try:
+ for value in values:
+ while True:
+ transmitted_value = next(transmitted_iterator)
+ if value == transmitted_value:
+ break
+ except StopIteration:
+ return False
+ else:
+ return True
-def test_secure_channel(
- target, channel_credentials, server_host_override):
- """Creates an insecure Channel to a remote host.
+def test_secure_channel(target, channel_credentials, server_host_override):
+ """Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
@@ -96,7 +103,7 @@ def test_secure_channel(
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
- channel = grpc.secure_channel(
- target, channel_credentials,
- (('grpc.ssl_target_name_override', server_host_override,),))
- return channel
+ channel = grpc.secure_channel(target, channel_credentials, ((
+ 'grpc.ssl_target_name_override',
+ server_host_override,),))
+ return channel
diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c
index 47fd6d9120..65fa2f2cf6 100644
--- a/src/ruby/ext/grpc/rb_byte_buffer.c
+++ b/src/ruby/ext/grpc/rb_byte_buffer.c
@@ -68,3 +68,10 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) {
grpc_byte_buffer_reader_destroy(&reader);
return rb_string;
}
+
+VALUE grpc_rb_slice_to_ruby_string(grpc_slice slice) {
+ if (GRPC_SLICE_START_PTR(slice) == NULL) {
+ rb_raise(rb_eRuntimeError, "attempt to convert uninitialized grpc_slice to ruby string");
+ }
+ return rb_str_new((char*)GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
+}
diff --git a/src/ruby/ext/grpc/rb_byte_buffer.h b/src/ruby/ext/grpc/rb_byte_buffer.h
index c7ddd76489..fac68fe6a0 100644
--- a/src/ruby/ext/grpc/rb_byte_buffer.h
+++ b/src/ruby/ext/grpc/rb_byte_buffer.h
@@ -44,4 +44,7 @@ grpc_byte_buffer *grpc_rb_s_to_byte_buffer(char *string, size_t length);
/* Converts a grpc_byte_buffer to a ruby string */
VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer);
+/* Converts a grpc_slice to a ruby string */
+VALUE grpc_rb_slice_to_ruby_string(grpc_slice slice);
+
#endif /* GRPC_RB_BYTE_BUFFER_H_ */
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 67a42af619..0179bd9e9b 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -121,8 +121,8 @@ static size_t md_ary_datasize(const void *p) {
size_t i, datasize = sizeof(grpc_metadata_array);
for (i = 0; i < ary->count; ++i) {
const grpc_metadata *const md = &ary->metadata[i];
- datasize += strlen(md->key);
- datasize += md->value_length;
+ datasize += GRPC_SLICE_LENGTH(md->key);
+ datasize += GRPC_SLICE_LENGTH(md->value);
}
datasize += ary->capacity * sizeof(grpc_metadata);
return datasize;
@@ -386,23 +386,23 @@ static int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
grpc_metadata_array *md_ary = NULL;
long array_length;
long i;
- char *key_str;
- size_t key_len;
- char *value_str;
- size_t value_len;
+ grpc_slice key_slice;
+ grpc_slice value_slice;
+ char* tmp_str;
if (TYPE(key) == T_SYMBOL) {
- key_str = (char *)rb_id2name(SYM2ID(key));
- key_len = strlen(key_str);
- } else { /* StringValueCStr does all other type exclusions for us */
- key_str = StringValueCStr(key);
- key_len = RSTRING_LEN(key);
+ key_slice = grpc_slice_from_static_string(rb_id2name(SYM2ID(key)));
+ } else if (TYPE(key) == T_STRING) {
+ key_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(key), RSTRING_LEN(key));
+ } else {
+ rb_raise(rb_eTypeError, "grpc_rb_md_ary_fill_hash_cb: bad type for key parameter");
}
- if (!grpc_header_key_is_legal(key_str, key_len)) {
+ if (!grpc_header_key_is_legal(key_slice)) {
+ tmp_str = grpc_slice_to_c_string(key_slice);
rb_raise(rb_eArgError,
"'%s' is an invalid header key, must match [a-z0-9-_.]+",
- key_str);
+ tmp_str);
return ST_STOP;
}
@@ -414,33 +414,31 @@ static int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
array_length = RARRAY_LEN(val);
/* If the value is an array, add capacity for each value in the array */
for (i = 0; i < array_length; i++) {
- value_str = RSTRING_PTR(rb_ary_entry(val, i));
- value_len = RSTRING_LEN(rb_ary_entry(val, i));
- if (!grpc_is_binary_header(key_str, key_len) &&
- !grpc_header_nonbin_value_is_legal(value_str, value_len)) {
+ value_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(rb_ary_entry(val, i)), RSTRING_LEN(rb_ary_entry(val, i)));
+ if (!grpc_is_binary_header(key_slice) &&
+ !grpc_header_nonbin_value_is_legal(value_slice)) {
// The value has invalid characters
+ tmp_str = grpc_slice_to_c_string(value_slice);
rb_raise(rb_eArgError,
- "Header value '%s' has invalid characters", value_str);
+ "Header value '%s' has invalid characters", tmp_str);
return ST_STOP;
}
- md_ary->metadata[md_ary->count].key = key_str;
- md_ary->metadata[md_ary->count].value = value_str;
- md_ary->metadata[md_ary->count].value_length = value_len;
+ md_ary->metadata[md_ary->count].key = key_slice;
+ md_ary->metadata[md_ary->count].value = value_slice;
md_ary->count += 1;
}
} else if (TYPE(val) == T_STRING) {
- value_str = RSTRING_PTR(val);
- value_len = RSTRING_LEN(val);
- if (!grpc_is_binary_header(key_str, key_len) &&
- !grpc_header_nonbin_value_is_legal(value_str, value_len)) {
+ value_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(val), RSTRING_LEN(val));
+ if (!grpc_is_binary_header(key_slice) &&
+ !grpc_header_nonbin_value_is_legal(value_slice)) {
// The value has invalid characters
+ tmp_str = grpc_slice_to_c_string(value_slice);
rb_raise(rb_eArgError,
- "Header value '%s' has invalid characters", value_str);
+ "Header value '%s' has invalid characters", tmp_str);
return ST_STOP;
}
- md_ary->metadata[md_ary->count].key = key_str;
- md_ary->metadata[md_ary->count].value = value_str;
- md_ary->metadata[md_ary->count].value_length = value_len;
+ md_ary->metadata[md_ary->count].key = key_slice;
+ md_ary->metadata[md_ary->count].value = value_slice;
md_ary->count += 1;
} else {
rb_raise(rb_eArgError,
@@ -506,22 +504,19 @@ VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary) {
size_t i;
for (i = 0; i < md_ary->count; i++) {
- key = rb_str_new2(md_ary->metadata[i].key);
+ key = grpc_rb_slice_to_ruby_string(md_ary->metadata[i].key);
value = rb_hash_aref(result, key);
if (value == Qnil) {
- value = rb_str_new(md_ary->metadata[i].value,
- md_ary->metadata[i].value_length);
+ value = grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value);
rb_hash_aset(result, key, value);
} else if (TYPE(value) == T_ARRAY) {
/* Add the string to the returned array */
- rb_ary_push(value, rb_str_new(md_ary->metadata[i].value,
- md_ary->metadata[i].value_length));
+ rb_ary_push(value, grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
} else {
/* Add the current value with this key and the new one to an array */
new_ary = rb_ary_new();
rb_ary_push(new_ary, value);
- rb_ary_push(new_ary, rb_str_new(md_ary->metadata[i].value,
- md_ary->metadata[i].value_length));
+ rb_ary_push(new_ary, grpc_rb_slice_to_ruby_string(md_ary->metadata[i].value));
rb_hash_aset(result, key, new_ary);
}
}
@@ -563,6 +558,7 @@ static int grpc_rb_call_check_op_keys_hash_cb(VALUE key, VALUE val,
*/
static void grpc_rb_op_update_status_from_server(grpc_op *op,
grpc_metadata_array *md_ary,
+ grpc_slice *send_status_details,
VALUE status) {
VALUE code = rb_struct_aref(status, sym_code);
VALUE details = rb_struct_aref(status, sym_details);
@@ -579,8 +575,11 @@ static void grpc_rb_op_update_status_from_server(grpc_op *op,
rb_obj_classname(code));
return;
}
+
+ *send_status_details = grpc_slice_from_copied_buffer(RSTRING_PTR(details), RSTRING_LEN(details));
+
op->data.send_status_from_server.status = NUM2INT(code);
- op->data.send_status_from_server.status_details = StringValueCStr(details);
+ op->data.send_status_from_server.status_details = send_status_details;
grpc_rb_md_ary_convert(metadata_hash, md_ary);
op->data.send_status_from_server.trailing_metadata_count = md_ary->count;
op->data.send_status_from_server.trailing_metadata = md_ary->metadata;
@@ -603,9 +602,9 @@ typedef struct run_batch_stack {
grpc_metadata_array recv_trailing_metadata;
int recv_cancelled;
grpc_status_code recv_status;
- char *recv_status_details;
- size_t recv_status_details_capacity;
+ grpc_slice recv_status_details;
unsigned write_flag;
+ grpc_slice send_status_details;
} run_batch_stack;
/* grpc_run_batch_stack_init ensures the run_batch_stack is properly
@@ -631,8 +630,12 @@ static void grpc_run_batch_stack_cleanup(run_batch_stack *st) {
grpc_metadata_array_destroy(&st->recv_metadata);
grpc_metadata_array_destroy(&st->recv_trailing_metadata);
- if (st->recv_status_details != NULL) {
- gpr_free(st->recv_status_details);
+ if (GRPC_SLICE_START_PTR(st->send_status_details) != NULL) {
+ grpc_slice_unref(st->send_status_details);
+ }
+
+ if (GRPC_SLICE_START_PTR(st->recv_status_details) != NULL) {
+ grpc_slice_unref(st->recv_status_details);
}
if (st->recv_message != NULL) {
@@ -683,7 +686,7 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) {
/* N.B. later there is no need to explicitly delete the metadata keys
* and values, they are references to data in ruby objects. */
grpc_rb_op_update_status_from_server(
- &st->ops[st->op_num], &st->send_trailing_metadata, this_value);
+ &st->ops[st->op_num], &st->send_trailing_metadata, &st->send_status_details, this_value);
break;
case GRPC_OP_RECV_INITIAL_METADATA:
st->ops[st->op_num].data.recv_initial_metadata = &st->recv_metadata;
@@ -698,8 +701,6 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) {
&st->recv_status;
st->ops[st->op_num].data.recv_status_on_client.status_details =
&st->recv_status_details;
- st->ops[st->op_num].data.recv_status_on_client.status_details_capacity =
- &st->recv_status_details_capacity;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
st->ops[st->op_num].data.recv_close_on_server.cancelled =
@@ -747,9 +748,9 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
rb_struct_aset(
result, sym_status,
rb_struct_new(grpc_rb_sStatus, UINT2NUM(st->recv_status),
- (st->recv_status_details == NULL
+ (GRPC_SLICE_START_PTR(st->recv_status_details) == NULL
? Qnil
- : rb_str_new2(st->recv_status_details)),
+ : grpc_rb_slice_to_ruby_string(st->recv_status_details)),
grpc_rb_md_ary_to_h(&st->recv_trailing_metadata),
NULL));
break;
diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c
index 3b2b88eb77..84e43d3f7b 100644
--- a/src/ruby/ext/grpc/rb_channel.c
+++ b/src/ruby/ext/grpc/rb_channel.c
@@ -35,6 +35,7 @@
#include "rb_grpc_imports.generated.h"
#include "rb_channel.h"
+#include "rb_byte_buffer.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
@@ -252,10 +253,14 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
grpc_channel *ch = NULL;
grpc_completion_queue *cq = NULL;
int flags = GRPC_PROPAGATE_DEFAULTS;
- char *method_chars = StringValueCStr(method);
- char *host_chars = NULL;
+ grpc_slice method_slice;
+ grpc_slice host_slice;
+ grpc_slice *host_slice_ptr = NULL;
+ char* tmp_str = NULL;
+
if (host != Qnil) {
- host_chars = StringValueCStr(host);
+ host_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
+ host_slice_ptr = &host_slice;
}
if (mask != Qnil) {
flags = NUM2UINT(mask);
@@ -272,15 +277,25 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
return Qnil;
}
- call = grpc_channel_create_call(ch, parent_call, flags, cq, method_chars,
- host_chars, grpc_rb_time_timeval(
+ method_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
+
+ call = grpc_channel_create_call(ch, parent_call, flags, cq, method_slice,
+ host_slice_ptr, grpc_rb_time_timeval(
deadline,
/* absolute time */ 0), NULL);
+
if (call == NULL) {
+ tmp_str = grpc_slice_to_c_string(method_slice);
rb_raise(rb_eRuntimeError, "cannot create call with method %s",
- method_chars);
+ tmp_str);
return Qnil;
}
+
+ grpc_slice_unref(method_slice);
+ if (host_slice_ptr != NULL) {
+ grpc_slice_unref(host_slice);
+ }
+
res = grpc_rb_wrap_call(call, cq);
/* Make this channel an instance attribute of the call so that it is not GCed
diff --git a/src/ruby/ext/grpc/rb_compression_options.c b/src/ruby/ext/grpc/rb_compression_options.c
index 6200dbafeb..6b2467ee46 100644
--- a/src/ruby/ext/grpc/rb_compression_options.c
+++ b/src/ruby/ext/grpc/rb_compression_options.c
@@ -34,6 +34,7 @@
#include <ruby/ruby.h>
#include "rb_compression_options.h"
+#include "rb_byte_buffer.h"
#include "rb_grpc_imports.generated.h"
#include <grpc/compression.h>
@@ -168,9 +169,9 @@ void grpc_rb_compression_options_set_default_level(
* Raises an error if the name of the algorithm passed in is invalid. */
void grpc_rb_compression_options_algorithm_name_to_value_internal(
grpc_compression_algorithm *algorithm_value, VALUE algorithm_name) {
- char *name_str = NULL;
- long name_len = 0;
+ grpc_slice name_slice;
VALUE algorithm_name_as_string = Qnil;
+ char *tmp_str = NULL;
Check_Type(algorithm_name, T_SYMBOL);
@@ -178,16 +179,18 @@ void grpc_rb_compression_options_algorithm_name_to_value_internal(
* correct C string out of it. */
algorithm_name_as_string = rb_funcall(algorithm_name, rb_intern("to_s"), 0);
- name_str = RSTRING_PTR(algorithm_name_as_string);
- name_len = RSTRING_LEN(algorithm_name_as_string);
+ name_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(algorithm_name_as_string), RSTRING_LEN(algorithm_name_as_string));
/* Raise an error if the name isn't recognized as a compression algorithm by
* the algorithm parse function
* in GRPC core. */
- if (!grpc_compression_algorithm_parse(name_str, name_len, algorithm_value)) {
+ if(!grpc_compression_algorithm_parse(name_slice, algorithm_value)) {
+ tmp_str = grpc_slice_to_c_string(name_slice);
rb_raise(rb_eNameError, "Invalid compression algorithm name: %s",
- StringValueCStr(algorithm_name_as_string));
+ tmp_str);
}
+
+ grpc_slice_unref(name_slice);
}
/* Indicates whether a given algorithm is enabled on this instance, given the
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 6c36df9113..230682e72d 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -179,17 +179,30 @@ grpc_slice_new_type grpc_slice_new_import;
grpc_slice_new_with_user_data_type grpc_slice_new_with_user_data_import;
grpc_slice_new_with_len_type grpc_slice_new_with_len_import;
grpc_slice_malloc_type grpc_slice_malloc_import;
+grpc_slice_intern_type grpc_slice_intern_import;
grpc_slice_from_copied_string_type grpc_slice_from_copied_string_import;
grpc_slice_from_copied_buffer_type grpc_slice_from_copied_buffer_import;
grpc_slice_from_static_string_type grpc_slice_from_static_string_import;
+grpc_slice_from_static_buffer_type grpc_slice_from_static_buffer_import;
grpc_slice_sub_type grpc_slice_sub_import;
grpc_slice_sub_no_ref_type grpc_slice_sub_no_ref_import;
grpc_slice_split_tail_type grpc_slice_split_tail_import;
grpc_slice_split_head_type grpc_slice_split_head_import;
-gpr_empty_slice_type gpr_empty_slice_import;
+grpc_empty_slice_type grpc_empty_slice_import;
+grpc_slice_default_hash_impl_type grpc_slice_default_hash_impl_import;
+grpc_slice_default_eq_impl_type grpc_slice_default_eq_impl_import;
+grpc_slice_eq_type grpc_slice_eq_import;
grpc_slice_cmp_type grpc_slice_cmp_import;
grpc_slice_str_cmp_type grpc_slice_str_cmp_import;
+grpc_slice_buf_cmp_type grpc_slice_buf_cmp_import;
+grpc_slice_buf_start_eq_type grpc_slice_buf_start_eq_import;
+grpc_slice_rchr_type grpc_slice_rchr_import;
+grpc_slice_chr_type grpc_slice_chr_import;
+grpc_slice_slice_type grpc_slice_slice_import;
+grpc_slice_hash_type grpc_slice_hash_import;
grpc_slice_is_equivalent_type grpc_slice_is_equivalent_import;
+grpc_slice_dup_type grpc_slice_dup_import;
+grpc_slice_to_c_string_type grpc_slice_to_c_string_import;
grpc_slice_buffer_init_type grpc_slice_buffer_init_import;
grpc_slice_buffer_destroy_type grpc_slice_buffer_destroy_import;
grpc_slice_buffer_add_type grpc_slice_buffer_add_import;
@@ -455,17 +468,30 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_slice_new_with_user_data_import = (grpc_slice_new_with_user_data_type) GetProcAddress(library, "grpc_slice_new_with_user_data");
grpc_slice_new_with_len_import = (grpc_slice_new_with_len_type) GetProcAddress(library, "grpc_slice_new_with_len");
grpc_slice_malloc_import = (grpc_slice_malloc_type) GetProcAddress(library, "grpc_slice_malloc");
+ grpc_slice_intern_import = (grpc_slice_intern_type) GetProcAddress(library, "grpc_slice_intern");
grpc_slice_from_copied_string_import = (grpc_slice_from_copied_string_type) GetProcAddress(library, "grpc_slice_from_copied_string");
grpc_slice_from_copied_buffer_import = (grpc_slice_from_copied_buffer_type) GetProcAddress(library, "grpc_slice_from_copied_buffer");
grpc_slice_from_static_string_import = (grpc_slice_from_static_string_type) GetProcAddress(library, "grpc_slice_from_static_string");
+ grpc_slice_from_static_buffer_import = (grpc_slice_from_static_buffer_type) GetProcAddress(library, "grpc_slice_from_static_buffer");
grpc_slice_sub_import = (grpc_slice_sub_type) GetProcAddress(library, "grpc_slice_sub");
grpc_slice_sub_no_ref_import = (grpc_slice_sub_no_ref_type) GetProcAddress(library, "grpc_slice_sub_no_ref");
grpc_slice_split_tail_import = (grpc_slice_split_tail_type) GetProcAddress(library, "grpc_slice_split_tail");
grpc_slice_split_head_import = (grpc_slice_split_head_type) GetProcAddress(library, "grpc_slice_split_head");
- gpr_empty_slice_import = (gpr_empty_slice_type) GetProcAddress(library, "gpr_empty_slice");
+ grpc_empty_slice_import = (grpc_empty_slice_type) GetProcAddress(library, "grpc_empty_slice");
+ grpc_slice_default_hash_impl_import = (grpc_slice_default_hash_impl_type) GetProcAddress(library, "grpc_slice_default_hash_impl");
+ grpc_slice_default_eq_impl_import = (grpc_slice_default_eq_impl_type) GetProcAddress(library, "grpc_slice_default_eq_impl");
+ grpc_slice_eq_import = (grpc_slice_eq_type) GetProcAddress(library, "grpc_slice_eq");
grpc_slice_cmp_import = (grpc_slice_cmp_type) GetProcAddress(library, "grpc_slice_cmp");
grpc_slice_str_cmp_import = (grpc_slice_str_cmp_type) GetProcAddress(library, "grpc_slice_str_cmp");
+ grpc_slice_buf_cmp_import = (grpc_slice_buf_cmp_type) GetProcAddress(library, "grpc_slice_buf_cmp");
+ grpc_slice_buf_start_eq_import = (grpc_slice_buf_start_eq_type) GetProcAddress(library, "grpc_slice_buf_start_eq");
+ grpc_slice_rchr_import = (grpc_slice_rchr_type) GetProcAddress(library, "grpc_slice_rchr");
+ grpc_slice_chr_import = (grpc_slice_chr_type) GetProcAddress(library, "grpc_slice_chr");
+ grpc_slice_slice_import = (grpc_slice_slice_type) GetProcAddress(library, "grpc_slice_slice");
+ grpc_slice_hash_import = (grpc_slice_hash_type) GetProcAddress(library, "grpc_slice_hash");
grpc_slice_is_equivalent_import = (grpc_slice_is_equivalent_type) GetProcAddress(library, "grpc_slice_is_equivalent");
+ grpc_slice_dup_import = (grpc_slice_dup_type) GetProcAddress(library, "grpc_slice_dup");
+ grpc_slice_to_c_string_import = (grpc_slice_to_c_string_type) GetProcAddress(library, "grpc_slice_to_c_string");
grpc_slice_buffer_init_import = (grpc_slice_buffer_init_type) GetProcAddress(library, "grpc_slice_buffer_init");
grpc_slice_buffer_destroy_import = (grpc_slice_buffer_destroy_type) GetProcAddress(library, "grpc_slice_buffer_destroy");
grpc_slice_buffer_add_import = (grpc_slice_buffer_add_type) GetProcAddress(library, "grpc_slice_buffer_add");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index 5745686adf..4c4f655b86 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -176,7 +176,7 @@ extern census_resource_id_type census_resource_id_import;
typedef void(*census_record_values_type)(census_context *context, census_value *values, size_t nvalues);
extern census_record_values_type census_record_values_import;
#define census_record_values census_record_values_import
-typedef int(*grpc_compression_algorithm_parse_type)(const char *name, size_t name_length, grpc_compression_algorithm *algorithm);
+typedef int(*grpc_compression_algorithm_parse_type)(grpc_slice value, grpc_compression_algorithm *algorithm);
extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import;
#define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import
typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name);
@@ -254,7 +254,7 @@ extern grpc_channel_check_connectivity_state_type grpc_channel_check_connectivit
typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import
-typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, const char *method, const char *host, gpr_timespec deadline, void *reserved);
+typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, grpc_slice method, const grpc_slice *host, gpr_timespec deadline, void *reserved);
extern grpc_channel_create_call_type grpc_channel_create_call_import;
#define grpc_channel_create_call grpc_channel_create_call_import
typedef void(*grpc_channel_ping_type)(grpc_channel *channel, grpc_completion_queue *cq, void *tag, void *reserved);
@@ -338,13 +338,13 @@ extern grpc_server_destroy_type grpc_server_destroy_import;
typedef int(*grpc_tracer_set_enabled_type)(const char *name, int enabled);
extern grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import;
#define grpc_tracer_set_enabled grpc_tracer_set_enabled_import
-typedef int(*grpc_header_key_is_legal_type)(const char *key, size_t length);
+typedef int(*grpc_header_key_is_legal_type)(grpc_slice slice);
extern grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
#define grpc_header_key_is_legal grpc_header_key_is_legal_import
-typedef int(*grpc_header_nonbin_value_is_legal_type)(const char *value, size_t length);
+typedef int(*grpc_header_nonbin_value_is_legal_type)(grpc_slice slice);
extern grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
#define grpc_header_nonbin_value_is_legal grpc_header_nonbin_value_is_legal_import
-typedef int(*grpc_is_binary_header_type)(const char *key, size_t length);
+typedef int(*grpc_is_binary_header_type)(grpc_slice slice);
extern grpc_is_binary_header_type grpc_is_binary_header_import;
#define grpc_is_binary_header grpc_is_binary_header_import
typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
@@ -488,6 +488,9 @@ extern grpc_slice_new_with_len_type grpc_slice_new_with_len_import;
typedef grpc_slice(*grpc_slice_malloc_type)(size_t length);
extern grpc_slice_malloc_type grpc_slice_malloc_import;
#define grpc_slice_malloc grpc_slice_malloc_import
+typedef grpc_slice(*grpc_slice_intern_type)(grpc_slice slice);
+extern grpc_slice_intern_type grpc_slice_intern_import;
+#define grpc_slice_intern grpc_slice_intern_import
typedef grpc_slice(*grpc_slice_from_copied_string_type)(const char *source);
extern grpc_slice_from_copied_string_type grpc_slice_from_copied_string_import;
#define grpc_slice_from_copied_string grpc_slice_from_copied_string_import
@@ -497,6 +500,9 @@ extern grpc_slice_from_copied_buffer_type grpc_slice_from_copied_buffer_import;
typedef grpc_slice(*grpc_slice_from_static_string_type)(const char *source);
extern grpc_slice_from_static_string_type grpc_slice_from_static_string_import;
#define grpc_slice_from_static_string grpc_slice_from_static_string_import
+typedef grpc_slice(*grpc_slice_from_static_buffer_type)(const void *source, size_t len);
+extern grpc_slice_from_static_buffer_type grpc_slice_from_static_buffer_import;
+#define grpc_slice_from_static_buffer grpc_slice_from_static_buffer_import
typedef grpc_slice(*grpc_slice_sub_type)(grpc_slice s, size_t begin, size_t end);
extern grpc_slice_sub_type grpc_slice_sub_import;
#define grpc_slice_sub grpc_slice_sub_import
@@ -509,18 +515,51 @@ extern grpc_slice_split_tail_type grpc_slice_split_tail_import;
typedef grpc_slice(*grpc_slice_split_head_type)(grpc_slice *s, size_t split);
extern grpc_slice_split_head_type grpc_slice_split_head_import;
#define grpc_slice_split_head grpc_slice_split_head_import
-typedef grpc_slice(*gpr_empty_slice_type)(void);
-extern gpr_empty_slice_type gpr_empty_slice_import;
-#define gpr_empty_slice gpr_empty_slice_import
+typedef grpc_slice(*grpc_empty_slice_type)(void);
+extern grpc_empty_slice_type grpc_empty_slice_import;
+#define grpc_empty_slice grpc_empty_slice_import
+typedef uint32_t(*grpc_slice_default_hash_impl_type)(grpc_slice s);
+extern grpc_slice_default_hash_impl_type grpc_slice_default_hash_impl_import;
+#define grpc_slice_default_hash_impl grpc_slice_default_hash_impl_import
+typedef int(*grpc_slice_default_eq_impl_type)(grpc_slice a, grpc_slice b);
+extern grpc_slice_default_eq_impl_type grpc_slice_default_eq_impl_import;
+#define grpc_slice_default_eq_impl grpc_slice_default_eq_impl_import
+typedef int(*grpc_slice_eq_type)(grpc_slice a, grpc_slice b);
+extern grpc_slice_eq_type grpc_slice_eq_import;
+#define grpc_slice_eq grpc_slice_eq_import
typedef int(*grpc_slice_cmp_type)(grpc_slice a, grpc_slice b);
extern grpc_slice_cmp_type grpc_slice_cmp_import;
#define grpc_slice_cmp grpc_slice_cmp_import
typedef int(*grpc_slice_str_cmp_type)(grpc_slice a, const char *b);
extern grpc_slice_str_cmp_type grpc_slice_str_cmp_import;
#define grpc_slice_str_cmp grpc_slice_str_cmp_import
+typedef int(*grpc_slice_buf_cmp_type)(grpc_slice a, const void *b, size_t blen);
+extern grpc_slice_buf_cmp_type grpc_slice_buf_cmp_import;
+#define grpc_slice_buf_cmp grpc_slice_buf_cmp_import
+typedef int(*grpc_slice_buf_start_eq_type)(grpc_slice a, const void *b, size_t blen);
+extern grpc_slice_buf_start_eq_type grpc_slice_buf_start_eq_import;
+#define grpc_slice_buf_start_eq grpc_slice_buf_start_eq_import
+typedef int(*grpc_slice_rchr_type)(grpc_slice s, char c);
+extern grpc_slice_rchr_type grpc_slice_rchr_import;
+#define grpc_slice_rchr grpc_slice_rchr_import
+typedef int(*grpc_slice_chr_type)(grpc_slice s, char c);
+extern grpc_slice_chr_type grpc_slice_chr_import;
+#define grpc_slice_chr grpc_slice_chr_import
+typedef int(*grpc_slice_slice_type)(grpc_slice haystack, grpc_slice needle);
+extern grpc_slice_slice_type grpc_slice_slice_import;
+#define grpc_slice_slice grpc_slice_slice_import
+typedef uint32_t(*grpc_slice_hash_type)(grpc_slice s);
+extern grpc_slice_hash_type grpc_slice_hash_import;
+#define grpc_slice_hash grpc_slice_hash_import
typedef int(*grpc_slice_is_equivalent_type)(grpc_slice a, grpc_slice b);
extern grpc_slice_is_equivalent_type grpc_slice_is_equivalent_import;
#define grpc_slice_is_equivalent grpc_slice_is_equivalent_import
+typedef grpc_slice(*grpc_slice_dup_type)(grpc_slice a);
+extern grpc_slice_dup_type grpc_slice_dup_import;
+#define grpc_slice_dup grpc_slice_dup_import
+typedef char *(*grpc_slice_to_c_string_type)(grpc_slice s);
+extern grpc_slice_to_c_string_type grpc_slice_to_c_string_import;
+#define grpc_slice_to_c_string grpc_slice_to_c_string_import
typedef void(*grpc_slice_buffer_init_type)(grpc_slice_buffer *sb);
extern grpc_slice_buffer_init_type grpc_slice_buffer_init_import;
#define grpc_slice_buffer_init grpc_slice_buffer_init_import
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index c7b112c94b..7b2f5774aa 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -44,6 +44,7 @@
#include "rb_channel_args.h"
#include "rb_completion_queue.h"
#include "rb_server_credentials.h"
+#include "rb_byte_buffer.h"
#include "rb_grpc.h"
/* grpc_rb_cServer is the ruby class that proxies grpc_server. */
@@ -166,8 +167,6 @@ static void grpc_request_call_stack_init(request_call_stack* st) {
MEMZERO(st, request_call_stack, 1);
grpc_metadata_array_init(&st->md_ary);
grpc_call_details_init(&st->details);
- st->details.method = NULL;
- st->details.host = NULL;
}
/* grpc_request_call_stack_cleanup ensures the request_call_stack is properly
@@ -191,6 +190,7 @@ static VALUE grpc_rb_server_request_call(VALUE self) {
void *tag = (void*)&st;
grpc_completion_queue *call_queue = grpc_completion_queue_create(NULL);
gpr_timespec deadline;
+
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
if (s->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "destroyed!");
@@ -218,11 +218,13 @@ static VALUE grpc_rb_server_request_call(VALUE self) {
return Qnil;
}
+
+
/* build the NewServerRpc struct result */
deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
result = rb_struct_new(
- grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
- rb_str_new2(st.details.host),
+ grpc_rb_sNewServerRpc, grpc_rb_slice_to_ruby_string(st.details.method),
+ grpc_rb_slice_to_ruby_string(st.details.host),
rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
INT2NUM(deadline.tv_nsec / 1000)),
grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),