aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.clang-format54
-rw-r--r--.gitignore5
-rw-r--r--INSTALL149
-rw-r--r--Makefile307
-rw-r--r--build.json51
-rw-r--r--examples/tips/client.cc60
-rw-r--r--examples/tips/client.h (renamed from src/node/port_picker.js)36
-rw-r--r--examples/tips/client_main.cc73
-rw-r--r--examples/tips/client_test.cc106
-rw-r--r--examples/tips/empty.proto13
-rw-r--r--examples/tips/label.proto48
-rw-r--r--examples/tips/pubsub.proto702
-rw-r--r--include/grpc++/server_credentials.h8
-rw-r--r--include/grpc/grpc.h3
-rw-r--r--include/grpc/grpc_security.h56
-rw-r--r--src/core/iomgr/pollset_kick_posix.c30
-rw-r--r--src/core/iomgr/socket_utils_common_posix.c4
-rw-r--r--src/core/iomgr/socket_utils_posix.c18
-rw-r--r--src/core/iomgr/tcp_server_posix.c2
-rw-r--r--src/core/security/credentials.c113
-rw-r--r--src/core/security/credentials.h13
-rw-r--r--src/core/security/security_context.c19
-rw-r--r--src/core/security/security_context.h2
-rw-r--r--src/core/security/server_secure_chttp2.c16
-rw-r--r--src/core/support/time_posix.c2
-rw-r--r--src/cpp/client/credentials.cc22
-rw-r--r--src/cpp/server/server_credentials.cc23
-rw-r--r--src/node/binding.gyp3
-rw-r--r--src/node/client.js53
-rw-r--r--src/node/common.js5
-rw-r--r--src/node/credentials.cc24
-rw-r--r--src/node/examples/math_server.js8
-rw-r--r--src/node/interop/empty.proto19
-rw-r--r--src/node/interop/interop_client.js274
-rw-r--r--src/node/interop/interop_server.js203
-rw-r--r--src/node/interop/messages.proto94
-rw-r--r--src/node/interop/test.proto42
-rw-r--r--src/node/main.js12
-rw-r--r--src/node/package.json6
-rw-r--r--src/node/server.cc4
-rw-r--r--src/node/server.js69
-rw-r--r--src/node/server_credentials.cc18
-rw-r--r--src/node/surface_client.js12
-rw-r--r--src/node/surface_server.js13
-rw-r--r--src/node/test/client_server_test.js159
-rw-r--r--src/node/test/end_to_end_test.js239
-rw-r--r--src/node/test/interop_sanity_test.js71
-rw-r--r--src/node/test/math_client_test.js18
-rw-r--r--src/node/test/server_test.js87
-rw-r--r--src/node/test/surface_test.js6
-rw-r--r--src/php/ext/grpc/credentials.c15
-rw-r--r--src/php/ext/grpc/server_credentials.c14
-rw-r--r--src/python/__init__.py0
-rw-r--r--src/python/_framework/__init__.py0
-rw-r--r--src/python/_framework/foundation/__init__.py0
-rw-r--r--src/python/_framework/foundation/_logging_pool_test.py (renamed from src/ruby/lib/grpc/beefcake.rb)65
-rw-r--r--src/python/_framework/foundation/logging_pool.py83
-rwxr-xr-xsrc/ruby/README.md83
-rwxr-xr-xsrc/ruby/bin/interop/README.md11
-rwxr-xr-xsrc/ruby/bin/interop/interop_client.rb16
-rwxr-xr-xsrc/ruby/bin/interop/interop_server.rb6
-rw-r--r--src/ruby/ext/grpc/extconf.rb8
-rw-r--r--src/ruby/ext/grpc/rb_completion_queue.c2
-rw-r--r--src/ruby/ext/grpc/rb_credentials.c19
-rw-r--r--src/ruby/ext/grpc/rb_server_credentials.c14
-rwxr-xr-xsrc/ruby/grpc.gemspec8
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb2
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb6
-rw-r--r--src/ruby/lib/grpc/logconfig.rb2
-rw-r--r--src/ruby/spec/client_server_spec.rb2
-rwxr-xr-xsrc/ruby/spec/testdata/README3
-rw-r--r--templates/Makefile.template30
-rw-r--r--test/core/end2end/data/prod_roots_certs.c5
-rw-r--r--test/core/end2end/data/server1_cert.c5
-rw-r--r--test/core/end2end/data/server1_key.c5
-rw-r--r--test/core/end2end/data/ssl_test_data.h12
-rw-r--r--test/core/end2end/data/test_root_cert.c5
-rw-r--r--test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c11
-rw-r--r--test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c10
-rw-r--r--test/core/fling/server.c7
-rw-r--r--test/core/iomgr/fd_posix_test.c25
-rw-r--r--test/core/iomgr/poll_kick_test.c30
-rw-r--r--test/core/iomgr/resolve_address_test.c5
-rw-r--r--test/core/iomgr/sockaddr_utils_test.c6
-rw-r--r--test/core/iomgr/tcp_client_posix_test.c4
-rw-r--r--test/core/security/credentials_test.c10
-rw-r--r--test/cpp/client/credentials_test.cc9
-rw-r--r--test/cpp/interop/client.cc4
-rw-r--r--test/cpp/interop/server.cc6
-rw-r--r--test/cpp/util/create_test_channel.cc7
-rw-r--r--tools/dockerfile/grpc_base/Dockerfile25
-rw-r--r--tools/dockerfile/grpc_cxx/Dockerfile19
-rw-r--r--tools/dockerfile/grpc_go/Dockerfile27
-rw-r--r--tools/dockerfile/grpc_go/README.md4
-rw-r--r--tools/dockerfile/grpc_php/Dockerfile5
-rw-r--r--tools/dockerfile/grpc_php_base/Dockerfile20
-rw-r--r--tools/dockerfile/grpc_ruby/Dockerfile9
-rw-r--r--tools/dockerfile/grpc_ruby_base/Dockerfile26
-rwxr-xr-xtools/gce_setup/grpc_docker.sh81
-rwxr-xr-xtools/gce_setup/shared_startup_funcs.sh57
-rwxr-xr-xtools/run_tests/build_python.sh10
-rwxr-xr-xtools/run_tests/jobset.py4
-rwxr-xr-xtools/run_tests/run_python.sh10
-rwxr-xr-xtools/run_tests/run_tests.py18
-rw-r--r--tools/run_tests/tests.json4
105 files changed, 3366 insertions, 882 deletions
diff --git a/.clang-format b/.clang-format
index 651e1296ba..4b3f13fa55 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,57 +1,5 @@
---
Language: Cpp
-# BasedOnStyle: Google
-AccessModifierOffset: -1
-ConstructorInitializerIndentWidth: 4
-AlignEscapedNewlinesLeft: true
-AlignTrailingComments: true
-AllowAllParametersOfDeclarationOnNextLine: true
-AllowShortBlocksOnASingleLine: false
-AllowShortIfStatementsOnASingleLine: true
-AllowShortLoopsOnASingleLine: true
-AllowShortFunctionsOnASingleLine: All
-AlwaysBreakTemplateDeclarations: true
-AlwaysBreakBeforeMultilineStrings: true
-BreakBeforeBinaryOperators: false
-BreakBeforeTernaryOperators: true
-BreakConstructorInitializersBeforeComma: false
-BinPackParameters: true
-ColumnLimit: 80
-ConstructorInitializerAllOnOneLineOrOnePerLine: true
-DerivePointerAlignment: true
-ExperimentalAutoDetectBinPacking: false
-IndentCaseLabels: true
-IndentWrappedFunctionNames: false
-IndentFunctionDeclarationAfterType: false
-MaxEmptyLinesToKeep: 1
-KeepEmptyLinesAtTheStartOfBlocks: false
-NamespaceIndentation: None
-ObjCSpaceAfterProperty: false
-ObjCSpaceBeforeProtocolList: false
-PenaltyBreakBeforeFirstCallParameter: 1
-PenaltyBreakComment: 300
-PenaltyBreakString: 1000
-PenaltyBreakFirstLessLess: 120
-PenaltyExcessCharacter: 1000000
-PenaltyReturnTypeOnItsOwnLine: 200
-PointerAlignment: Left
-SpacesBeforeTrailingComments: 2
-Cpp11BracedListStyle: true
-Standard: Auto
-IndentWidth: 2
-TabWidth: 8
-UseTab: Never
-BreakBeforeBraces: Attach
-SpacesInParentheses: false
-SpacesInAngles: false
-SpaceInEmptyParentheses: false
-SpacesInCStyleCastParentheses: false
-SpacesInContainerLiterals: true
-SpaceBeforeAssignmentOperators: true
-ContinuationIndentWidth: 4
-CommentPragmas: '^ IWYU pragma:'
-ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
-SpaceBeforeParens: ControlStatements
-DisableFormat: false
+BasedOnStyle: Google
...
diff --git a/.gitignore b/.gitignore
index 3efc25aafb..002e3e661c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,9 @@ gens
libs
objs
+# Python virtual environment (pre-3.4 only)
+python2.7_virtual_environment
+
# gcov coverage data
coverage
*.gcno
@@ -17,3 +20,5 @@ coverage
# cache for run_tests.py
.run_tests_cache
+# emacs temp files
+*~ \ No newline at end of file
diff --git a/INSTALL b/INSTALL
index a9b0b58aa6..48511aff7d 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,83 +1,142 @@
-Dependencies
-============
+These instructions only cover building grpc C and C++ libraries under
+typical unix systems. If you need more information, please try grpc's
+wiki pages:
-grpc has few external dependencies. If needed, they are present in the
-third_party directory, if you have cloned the github repository recursively.
-If you didn't clone recursively, you can still get them later by running the
-following command:
+ https://github.com/google/grpc/wiki
-$ git submodule update --init
-Note that the Makefile makes it much easier for you to compile from sources
-if you were to clone recursively our git repository.
+*************************
+* If you are in a hurry *
+*************************
+A typical unix installation won't require any more steps than running:
-grpc core currently depends on zlib and OpenSSL 1.0.2beta3.
+ $ make
+ # make install
-grpc++'s tests depends on protobuf 3.0.0, gtests and gflags.
+You don't need anything else than GNU Make and gcc. Under a Debian or
+Ubuntu system, this should boil down to the following package:
-OpenSSL
--------
+ # apt-get install build-essential python-all-dev python-virtualenv
-Secure HTTP2 requires to have the TLS extension ALPN (see rfc 7301 and
-http://http2.github.io/http2-spec/ section 3.3). Our HTTP2 implementation
-relies on OpenSSL's implementation. OpenSSL 1.0.2beta3 is the first version
-of OpenSSL that has ALPN support, and this explains our dependency on it.
-Note that the Makefile supports compiling only the unsecure elements of grpc,
-and if you do not have OpenSSL and do not want it, you can still proceed
-with installing only the elements you require. However, it is recommended
-to encrypt your network traffic, therefore we urge you to not use the unsecure
-version of grpc if possible.
+*******************************
+* More detailled instructions *
+*******************************
+Setting up dependencies
+=======================
-Compiling
-=========
+Dependencies to compile the libraries
+-------------------------------------
-If you have all the dependencies in the third_party subfolder, you should
-simply be able to go ahead and run "make" to compile grpc. The other targets
-that you might find interesting are "buildtests" and "test".
+grpc libraries have few external dependencies. If you need to compile and
+install them, they are present in the third_party directory if you have
+cloned the github repository recursively. If you didn't clone recursively,
+you can still get them later by running the following command:
-If you didn't clone from git, and thus are unable to get the required
-dependencies, you can manually download and unpack the necessary packages,
-and let the Makefile build them itself.
+ $ git submodule update --init
-You may also install the dependencies yourself, from the sources, or from
-your distribution's package manager.
+Note that the Makefile makes it much easier for you to compile from sources
+if you were to clone recursively our git repository: it will automatically
+compile zlib and OpenSSL, which are core requirements for grpc. Note this
+creates grpc libraries that will have zlib and OpenSSL built-in inside of them,
+which significantly increases the libraries' size.
+
+In order to decrease that size, you can manually install zlib and OpenSSL on
+your system, so that the Makefile can use them instead.
+
+Under a Debian or Ubuntu system, one can acquire the development package
+for zlib this way:
-The only development package needed for grpc is zlib.
-The development packages needed for grpc++'s tests are gtests, and gflags.
+ # apt-get install zlib1g-dev
To the best of our knowledge, no distribution has an OpenSSL package that
supports ALPN yet, so you would still have to depend on installing from source
-for that particular dependency.
+for that particular dependency if you want to reduce the libraries' size.
The recommended version of OpenSSL that provides ALPN support is available
at this URL:
https://www.openssl.org/source/openssl-1.0.2-beta3.tar.gz
-If you want to let the Makefile build them automatically for you, please
-extract them in the third_party folder. You will need to rename the extracted
-folder the following way:
- openssl-1.0.2-beta3 --> openssl
+Dependencies to compile and run the tests
+-----------------------------------------
+
+Compiling and running grpc plain-C tests dont't require any more dependency.
+
+
+Compiling and running grpc C++ tests depend on protobuf 3.0.0, gtest and
+gflags. Although gflags and protobuf are provided in third_party, you will
+need to manually install these dependencies on your system to run these tests.
+
+Under a Debian or Ubuntu system, you can install the gtests and gflags packages
+using apt-get:
+
+ # apt-get install libgflags-dev libgtest-dev
+
+However, protobuf 3.0.0 isn't in a debian package yet: you'll need to compile
+and install it from the sources in the third_party. Note that if you already
+have the protobuf and protoc packages installed on your system, they will most
+likely interfere, and you'll need to uninstall them first.
+
+Compiling and installing protobuf 3.0.0 requires a few more dependencies in
+itself, notably the autoconf suite, curl, and unzip. If you have apt-get, you
+can install these dependencies this way:
+
+ # apt-get install unzip curl autotools-dev
+
+Then, you can build and install protobuf 3.0.0:
+
+ $ cd third_party/protobuf
+ $ ./configure
+ $ make
+ # make install
+ # ldconfig
+
+
+A word on OpenSSL
+-----------------
+
+Secure HTTP2 requires to have the TLS extension ALPN (see rfc 7301 and
+http://http2.github.io/http2-spec/ section 3.3). Our HTTP2 implementation
+relies on OpenSSL's implementation. OpenSSL 1.0.2beta3 is the first version
+of OpenSSL that has ALPN support, and this explains our dependency on it.
+
+Note that the Makefile supports compiling only the unsecure elements of grpc,
+and if you do not have OpenSSL and do not want it, you can still proceed
+with installing only the elements you require. However, it is recommended
+to encrypt your network traffic, therefore we urge you to not use the unsecure
+version of grpc if possible.
+
+
+Compiling
+=========
+
+If you have all the dependencies mentioned above, you should simply be able
+to go ahead and run "make" to compile grpc's C and C++ libraries:
+
+ $ make
Testing
=======
-At the moment, C++ tests aren't fully available yet. If you want to run tests
-on the C core of grpc, you can do the following:
+To build and run the tests, you can run the command:
+
+ $ make test
+
+If you want to be able to run them in parallel, and get better output, you can
+also use the python tool we have written:
-$ make buildtests_c
-$ make test_c
+ $ ./tools/run_tests/run_tests.py
Installing
==========
-Once everything is compiled, you should be able to install grpc and grpc++
+Once everything is compiled, you should be able to install grpc C and C++
libraries and headers:
-$ sudo make install
+ # make install
diff --git a/Makefile b/Makefile
index 8338f0f049..1d1aff2a6a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,6 +2,14 @@
# This currently builds C and C++ code.
+
+# Basic platform detection
+HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
+ifeq ($(SYSTEM),)
+SYSTEM = $(HOST_SYSTEM)
+endif
+
+
# Configurations
VALID_CONFIG_opt = 1
@@ -115,10 +123,15 @@ LDFLAGS += $(LDFLAGS_$(CONFIG))
CFLAGS += -std=c89 -pedantic
CXXFLAGS += -std=c++11
CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long
-LDFLAGS += -g -pthread -fPIC
+LDFLAGS += -g -fPIC
INCLUDES = . include gens
+ifeq ($(SYSTEM),Darwin)
+LIBS = m z
+else
LIBS = rt m z pthread
+LDFLAGS += -pthread
+endif
LIBSXX = protobuf
LIBS_PROTOC = protoc protobuf
@@ -156,11 +169,6 @@ HOST_LDLIBS = $(LDLIBS)
# These are automatically computed variables.
# There shouldn't be any need to change anything from now on.
-HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
-ifeq ($(SYSTEM),)
-SYSTEM = $(HOST_SYSTEM)
-endif
-
ifeq ($(SYSTEM),MINGW32)
SHARED_EXT = dll
endif
@@ -372,6 +380,8 @@ credentials_test: bins/$(CONFIG)/credentials_test
end2end_test: bins/$(CONFIG)/end2end_test
interop_client: bins/$(CONFIG)/interop_client
interop_server: bins/$(CONFIG)/interop_server
+tips_client: bins/$(CONFIG)/tips_client
+tips_client_test: bins/$(CONFIG)/tips_client_test
qps_client: bins/$(CONFIG)/qps_client
qps_server: bins/$(CONFIG)/qps_server
ruby_plugin: bins/$(CONFIG)/ruby_plugin
@@ -524,8 +534,12 @@ libs/$(CONFIG)/zlib/libz.a:
$(Q)cp third_party/zlib/libz.a libs/$(CONFIG)/zlib
libs/$(CONFIG)/openssl/libssl.a:
- $(E) "[MAKE] Building openssl"
+ $(E) "[MAKE] Building openssl for $(SYSTEM)"
+ifeq ($(SYSTEM),Darwin)
+ $(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./Configure darwin64-x86_64-cc $(OPENSSL_CONFIG_$(CONFIG)))
+else
$(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config $(OPENSSL_CONFIG_$(CONFIG)))
+endif
$(Q)$(MAKE) -C third_party/openssl clean
$(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl
$(Q)mkdir -p libs/$(CONFIG)/openssl
@@ -547,13 +561,13 @@ privatelibs: privatelibs_c privatelibs_cxx
privatelibs_c: libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a libs/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a libs/$(CONFIG)/libend2end_test_cancel_after_accept.a libs/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a libs/$(CONFIG)/libend2end_test_cancel_after_invoke.a libs/$(CONFIG)/libend2end_test_cancel_before_invoke.a libs/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a libs/$(CONFIG)/libend2end_test_census_simple_request.a libs/$(CONFIG)/libend2end_test_disappearing_server.a libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a libs/$(CONFIG)/libend2end_test_graceful_server_shutdown.a libs/$(CONFIG)/libend2end_test_invoke_large_request.a libs/$(CONFIG)/libend2end_test_max_concurrent_streams.a libs/$(CONFIG)/libend2end_test_no_op.a libs/$(CONFIG)/libend2end_test_ping_pong_streaming.a libs/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a libs/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a libs/$(CONFIG)/libend2end_test_request_response_with_payload.a libs/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a libs/$(CONFIG)/libend2end_test_simple_delayed_request.a libs/$(CONFIG)/libend2end_test_simple_request.a libs/$(CONFIG)/libend2end_test_thread_stress.a libs/$(CONFIG)/libend2end_test_writes_done_hangs_with_pending_read.a libs/$(CONFIG)/libend2end_certs.a
-privatelibs_cxx: libs/$(CONFIG)/libgrpc++_test_util.a
+privatelibs_cxx: libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libtips_client_lib.a
buildtests: buildtests_c buildtests_cxx
buildtests_c: privatelibs_c bins/$(CONFIG)/alarm_heap_test bins/$(CONFIG)/alarm_list_test bins/$(CONFIG)/alarm_test bins/$(CONFIG)/alpn_test bins/$(CONFIG)/bin_encoder_test bins/$(CONFIG)/census_hash_table_test bins/$(CONFIG)/census_statistics_multiple_writers_circular_buffer_test bins/$(CONFIG)/census_statistics_multiple_writers_test bins/$(CONFIG)/census_statistics_performance_test bins/$(CONFIG)/census_statistics_quick_test bins/$(CONFIG)/census_statistics_small_log_test bins/$(CONFIG)/census_stub_test bins/$(CONFIG)/census_window_stats_test bins/$(CONFIG)/chttp2_status_conversion_test bins/$(CONFIG)/chttp2_stream_encoder_test bins/$(CONFIG)/chttp2_stream_map_test bins/$(CONFIG)/chttp2_transport_end2end_test bins/$(CONFIG)/dualstack_socket_test bins/$(CONFIG)/echo_client bins/$(CONFIG)/echo_server bins/$(CONFIG)/echo_test bins/$(CONFIG)/fd_posix_test bins/$(CONFIG)/fling_client bins/$(CONFIG)/fling_server bins/$(CONFIG)/fling_stream_test bins/$(CONFIG)/fling_test bins/$(CONFIG)/gpr_cancellable_test bins/$(CONFIG)/gpr_cmdline_test bins/$(CONFIG)/gpr_histogram_test bins/$(CONFIG)/gpr_host_port_test bins/$(CONFIG)/gpr_log_test bins/$(CONFIG)/gpr_slice_buffer_test bins/$(CONFIG)/gpr_slice_test bins/$(CONFIG)/gpr_string_test bins/$(CONFIG)/gpr_sync_test bins/$(CONFIG)/gpr_thd_test bins/$(CONFIG)/gpr_time_test bins/$(CONFIG)/gpr_useful_test bins/$(CONFIG)/grpc_base64_test bins/$(CONFIG)/grpc_byte_buffer_reader_test bins/$(CONFIG)/grpc_channel_stack_test bins/$(CONFIG)/grpc_completion_queue_test bins/$(CONFIG)/grpc_credentials_test bins/$(CONFIG)/grpc_json_token_test bins/$(CONFIG)/grpc_stream_op_test bins/$(CONFIG)/hpack_parser_test bins/$(CONFIG)/hpack_table_test bins/$(CONFIG)/httpcli_format_request_test bins/$(CONFIG)/httpcli_parser_test bins/$(CONFIG)/httpcli_test bins/$(CONFIG)/lame_client_test bins/$(CONFIG)/message_compress_test bins/$(CONFIG)/metadata_buffer_test bins/$(CONFIG)/murmur_hash_test bins/$(CONFIG)/no_server_test bins/$(CONFIG)/poll_kick_test bins/$(CONFIG)/resolve_address_test bins/$(CONFIG)/secure_endpoint_test bins/$(CONFIG)/sockaddr_utils_test bins/$(CONFIG)/tcp_client_posix_test bins/$(CONFIG)/tcp_posix_test bins/$(CONFIG)/tcp_server_posix_test bins/$(CONFIG)/time_averaged_stats_test bins/$(CONFIG)/time_test bins/$(CONFIG)/timeout_encoding_test bins/$(CONFIG)/transport_metadata_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fake_security_census_simple_request_test bins/$(CONFIG)/chttp2_fake_security_disappearing_server_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fake_security_invoke_large_request_test bins/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fake_security_no_op_test bins/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test bins/$(CONFIG)/chttp2_fake_security_simple_request_test bins/$(CONFIG)/chttp2_fake_security_thread_stress_test bins/$(CONFIG)/chttp2_fake_security_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fullstack_no_op_test bins/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_no_op_test bins/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
-buildtests_cxx: privatelibs_cxx bins/$(CONFIG)/channel_arguments_test bins/$(CONFIG)/credentials_test bins/$(CONFIG)/end2end_test bins/$(CONFIG)/interop_client bins/$(CONFIG)/interop_server bins/$(CONFIG)/qps_client bins/$(CONFIG)/qps_server bins/$(CONFIG)/status_test bins/$(CONFIG)/sync_client_async_server_test bins/$(CONFIG)/thread_pool_test
+buildtests_cxx: privatelibs_cxx bins/$(CONFIG)/channel_arguments_test bins/$(CONFIG)/credentials_test bins/$(CONFIG)/end2end_test bins/$(CONFIG)/interop_client bins/$(CONFIG)/interop_server bins/$(CONFIG)/tips_client bins/$(CONFIG)/tips_client_test bins/$(CONFIG)/qps_client bins/$(CONFIG)/qps_server bins/$(CONFIG)/status_test bins/$(CONFIG)/sync_client_async_server_test bins/$(CONFIG)/thread_pool_test
test: test_c test_cxx
@@ -955,6 +969,8 @@ test_cxx: buildtests_cxx
$(Q) ./bins/$(CONFIG)/credentials_test || ( echo test credentials_test failed ; exit 1 )
$(E) "[RUN] Testing end2end_test"
$(Q) ./bins/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 )
+ $(E) "[RUN] Testing tips_client_test"
+ $(Q) ./bins/$(CONFIG)/tips_client_test || ( echo test tips_client_test failed ; exit 1 )
$(E) "[RUN] Testing qps_client"
$(Q) ./bins/$(CONFIG)/qps_client || ( echo test qps_client failed ; exit 1 )
$(E) "[RUN] Testing qps_server"
@@ -1008,6 +1024,21 @@ strip-shared_cxx: shared_cxx
$(E) "[STRIP] Stripping libgrpc++.so"
$(Q) $(STRIP) libs/$(CONFIG)/libgrpc++.$(SHARED_EXT)
+gens/examples/tips/empty.pb.cc: examples/tips/empty.proto $(PROTOC_PLUGINS)
+ $(E) "[PROTOC] Generating protobuf CC file from $<"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(PROTOC) --cpp_out=gens --grpc_out=gens --plugin=protoc-gen-grpc=bins/$(CONFIG)/cpp_plugin $<
+
+gens/examples/tips/label.pb.cc: examples/tips/label.proto $(PROTOC_PLUGINS)
+ $(E) "[PROTOC] Generating protobuf CC file from $<"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(PROTOC) --cpp_out=gens --grpc_out=gens --plugin=protoc-gen-grpc=bins/$(CONFIG)/cpp_plugin $<
+
+gens/examples/tips/pubsub.pb.cc: examples/tips/pubsub.proto $(PROTOC_PLUGINS)
+ $(E) "[PROTOC] Generating protobuf CC file from $<"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(PROTOC) --cpp_out=gens --grpc_out=gens --plugin=protoc-gen-grpc=bins/$(CONFIG)/cpp_plugin $<
+
gens/test/cpp/interop/empty.pb.cc: test/cpp/interop/empty.proto $(PROTOC_PLUGINS)
$(E) "[PROTOC] Generating protobuf CC file from $<"
$(Q) mkdir -p `dirname $@`
@@ -1220,7 +1251,11 @@ LIBGPR_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGPR_S
libs/$(CONFIG)/libgpr.a: $(ZLIB_DEP) $(LIBGPR_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgpr.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgpr.a $(LIBGPR_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgpr.a
+endif
@@ -1296,7 +1331,11 @@ endif
libs/$(CONFIG)/libgpr_test_util.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGPR_TEST_UTIL_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgpr_test_util.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgpr_test_util.a $(LIBGPR_TEST_UTIL_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgpr_test_util.a
+endif
@@ -1532,6 +1571,7 @@ endif
libs/$(CONFIG)/libgrpc.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgrpc.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgrpc.a $(LIBGRPC_OBJS)
$(Q) rm -rf tmp-merge
$(Q) mkdir tmp-merge
@@ -1540,6 +1580,9 @@ libs/$(CONFIG)/libgrpc.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_OBJS)
$(Q) rm -f libs/$(CONFIG)/libgrpc.a tmp-merge/__.SYMDEF*
$(Q) ar rcs libs/$(CONFIG)/libgrpc.a tmp-merge/*
$(Q) rm -rf tmp-merge
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgrpc.a
+endif
@@ -1709,7 +1752,11 @@ endif
libs/$(CONFIG)/libgrpc_test_util.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_TEST_UTIL_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgrpc_test_util.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgrpc_test_util.a $(LIBGRPC_TEST_UTIL_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgrpc_test_util.a
+endif
@@ -1831,7 +1878,11 @@ LIBGRPC_UNSECURE_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename
libs/$(CONFIG)/libgrpc_unsecure.a: $(ZLIB_DEP) $(LIBGRPC_UNSECURE_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgrpc_unsecure.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgrpc_unsecure.a $(LIBGRPC_UNSECURE_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgrpc_unsecure.a
+endif
@@ -2025,7 +2076,11 @@ endif
libs/$(CONFIG)/libgrpc++.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC++_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgrpc++.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgrpc++.a $(LIBGRPC++_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgrpc++.a
+endif
@@ -2107,7 +2162,11 @@ endif
libs/$(CONFIG)/libgrpc++_test_util.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC++_TEST_UTIL_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libgrpc++_test_util.a
$(Q) $(AR) rcs libs/$(CONFIG)/libgrpc++_test_util.a $(LIBGRPC++_TEST_UTIL_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libgrpc++_test_util.a
+endif
@@ -2128,6 +2187,58 @@ objs/$(CONFIG)/test/cpp/end2end/async_test_server.o: gens/test/cpp/util/echo
objs/$(CONFIG)/test/cpp/util/create_test_channel.o: gens/test/cpp/util/echo.pb.cc gens/test/cpp/util/echo_duplicate.pb.cc gens/test/cpp/util/messages.pb.cc
+LIBTIPS_CLIENT_LIB_SRC = \
+ gens/examples/tips/label.pb.cc \
+ gens/examples/tips/empty.pb.cc \
+ gens/examples/tips/pubsub.pb.cc \
+ examples/tips/client.cc \
+
+
+LIBTIPS_CLIENT_LIB_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBTIPS_CLIENT_LIB_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure libraries if you don't have OpenSSL with ALPN.
+
+libs/$(CONFIG)/libtips_client_lib.a: openssl_dep_error
+
+
+else
+
+ifneq ($(OPENSSL_DEP),)
+examples/tips/label.proto: $(OPENSSL_DEP)
+examples/tips/empty.proto: $(OPENSSL_DEP)
+examples/tips/pubsub.proto: $(OPENSSL_DEP)
+examples/tips/client.cc: $(OPENSSL_DEP)
+endif
+
+libs/$(CONFIG)/libtips_client_lib.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBTIPS_CLIENT_LIB_OBJS)
+ $(E) "[AR] Creating $@"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libtips_client_lib.a
+ $(Q) $(AR) rcs libs/$(CONFIG)/libtips_client_lib.a $(LIBTIPS_CLIENT_LIB_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libtips_client_lib.a
+endif
+
+
+
+
+
+endif
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(LIBTIPS_CLIENT_LIB_OBJS:.o=.dep)
+endif
+endif
+
+
+
+
+objs/$(CONFIG)/examples/tips/client.o: gens/examples/tips/label.pb.cc gens/examples/tips/empty.pb.cc gens/examples/tips/pubsub.pb.cc
+
+
LIBEND2END_FIXTURE_CHTTP2_FAKE_SECURITY_SRC = \
test/core/end2end/fixtures/chttp2_fake_security.c \
@@ -2150,7 +2261,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_FAKE_SECURITY_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a $(LIBEND2END_FIXTURE_CHTTP2_FAKE_SECURITY_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a
+endif
@@ -2189,7 +2304,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_FULLSTACK_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBEND2END_FIXTURE_CHTTP2_FULLSTACK_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a
+endif
@@ -2228,7 +2347,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_SIMPLE_SSL_FULLSTACK_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a $(LIBEND2END_FIXTURE_CHTTP2_SIMPLE_SSL_FULLSTACK_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a
+endif
@@ -2267,7 +2390,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_SIMPLE_SSL_WITH_OAUTH2_FULLSTACK_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a $(LIBEND2END_FIXTURE_CHTTP2_SIMPLE_SSL_WITH_OAUTH2_FULLSTACK_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a
+endif
@@ -2306,7 +2433,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_SOCKET_PAIR_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBEND2END_FIXTURE_CHTTP2_SOCKET_PAIR_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a
+endif
@@ -2345,7 +2476,11 @@ endif
libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_FIXTURE_CHTTP2_SOCKET_PAIR_ONE_BYTE_AT_A_TIME_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBEND2END_FIXTURE_CHTTP2_SOCKET_PAIR_ONE_BYTE_AT_A_TIME_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a
+endif
@@ -2371,7 +2506,11 @@ LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuf
libs/$(CONFIG)/libend2end_test_cancel_after_accept.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_cancel_after_accept.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_cancel_after_accept.a $(LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_cancel_after_accept.a
+endif
@@ -2393,7 +2532,11 @@ LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_AND_WRITES_CLOSED_OBJS = $(addprefix objs/$(
libs/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_AND_WRITES_CLOSED_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a $(LIBEND2END_TEST_CANCEL_AFTER_ACCEPT_AND_WRITES_CLOSED_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a
+endif
@@ -2415,7 +2558,11 @@ LIBEND2END_TEST_CANCEL_AFTER_INVOKE_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuf
libs/$(CONFIG)/libend2end_test_cancel_after_invoke.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CANCEL_AFTER_INVOKE_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_cancel_after_invoke.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_cancel_after_invoke.a $(LIBEND2END_TEST_CANCEL_AFTER_INVOKE_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_cancel_after_invoke.a
+endif
@@ -2437,7 +2584,11 @@ LIBEND2END_TEST_CANCEL_BEFORE_INVOKE_OBJS = $(addprefix objs/$(CONFIG)/, $(addsu
libs/$(CONFIG)/libend2end_test_cancel_before_invoke.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CANCEL_BEFORE_INVOKE_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_cancel_before_invoke.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_cancel_before_invoke.a $(LIBEND2END_TEST_CANCEL_BEFORE_INVOKE_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_cancel_before_invoke.a
+endif
@@ -2459,7 +2610,11 @@ LIBEND2END_TEST_CANCEL_IN_A_VACUUM_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuff
libs/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CANCEL_IN_A_VACUUM_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a $(LIBEND2END_TEST_CANCEL_IN_A_VACUUM_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a
+endif
@@ -2481,7 +2636,11 @@ LIBEND2END_TEST_CENSUS_SIMPLE_REQUEST_OBJS = $(addprefix objs/$(CONFIG)/, $(adds
libs/$(CONFIG)/libend2end_test_census_simple_request.a: $(ZLIB_DEP) $(LIBEND2END_TEST_CENSUS_SIMPLE_REQUEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_census_simple_request.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_census_simple_request.a $(LIBEND2END_TEST_CENSUS_SIMPLE_REQUEST_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_census_simple_request.a
+endif
@@ -2503,7 +2662,11 @@ LIBEND2END_TEST_DISAPPEARING_SERVER_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuf
libs/$(CONFIG)/libend2end_test_disappearing_server.a: $(ZLIB_DEP) $(LIBEND2END_TEST_DISAPPEARING_SERVER_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_disappearing_server.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_disappearing_server.a $(LIBEND2END_TEST_DISAPPEARING_SERVER_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_disappearing_server.a
+endif
@@ -2525,7 +2688,11 @@ LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_INFLIGHT_CALLS_OBJS = $(addprefix
libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a: $(ZLIB_DEP) $(LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_INFLIGHT_CALLS_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a $(LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_INFLIGHT_CALLS_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a
+endif
@@ -2547,7 +2714,11 @@ LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_TAGS_OBJS = $(addprefix objs/$(CO
libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a: $(ZLIB_DEP) $(LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_TAGS_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a $(LIBEND2END_TEST_EARLY_SERVER_SHUTDOWN_FINISHES_TAGS_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a
+endif
@@ -2569,7 +2740,11 @@ LIBEND2END_TEST_GRACEFUL_SERVER_SHUTDOWN_OBJS = $(addprefix objs/$(CONFIG)/, $(a
libs/$(CONFIG)/libend2end_test_graceful_server_shutdown.a: $(ZLIB_DEP) $(LIBEND2END_TEST_GRACEFUL_SERVER_SHUTDOWN_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_graceful_server_shutdown.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_graceful_server_shutdown.a $(LIBEND2END_TEST_GRACEFUL_SERVER_SHUTDOWN_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_graceful_server_shutdown.a
+endif
@@ -2591,7 +2766,11 @@ LIBEND2END_TEST_INVOKE_LARGE_REQUEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsu
libs/$(CONFIG)/libend2end_test_invoke_large_request.a: $(ZLIB_DEP) $(LIBEND2END_TEST_INVOKE_LARGE_REQUEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_invoke_large_request.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_invoke_large_request.a $(LIBEND2END_TEST_INVOKE_LARGE_REQUEST_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_invoke_large_request.a
+endif
@@ -2613,7 +2792,11 @@ LIBEND2END_TEST_MAX_CONCURRENT_STREAMS_OBJS = $(addprefix objs/$(CONFIG)/, $(add
libs/$(CONFIG)/libend2end_test_max_concurrent_streams.a: $(ZLIB_DEP) $(LIBEND2END_TEST_MAX_CONCURRENT_STREAMS_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_max_concurrent_streams.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_max_concurrent_streams.a $(LIBEND2END_TEST_MAX_CONCURRENT_STREAMS_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_max_concurrent_streams.a
+endif
@@ -2635,7 +2818,11 @@ LIBEND2END_TEST_NO_OP_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(base
libs/$(CONFIG)/libend2end_test_no_op.a: $(ZLIB_DEP) $(LIBEND2END_TEST_NO_OP_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_no_op.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_no_op.a $(LIBEND2END_TEST_NO_OP_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_no_op.a
+endif
@@ -2657,7 +2844,11 @@ LIBEND2END_TEST_PING_PONG_STREAMING_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuf
libs/$(CONFIG)/libend2end_test_ping_pong_streaming.a: $(ZLIB_DEP) $(LIBEND2END_TEST_PING_PONG_STREAMING_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_ping_pong_streaming.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_ping_pong_streaming.a $(LIBEND2END_TEST_PING_PONG_STREAMING_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_ping_pong_streaming.a
+endif
@@ -2679,7 +2870,11 @@ LIBEND2END_TEST_REQUEST_RESPONSE_WITH_BINARY_METADATA_AND_PAYLOAD_OBJS = $(addpr
libs/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a: $(ZLIB_DEP) $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_BINARY_METADATA_AND_PAYLOAD_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_BINARY_METADATA_AND_PAYLOAD_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a
+endif
@@ -2701,7 +2896,11 @@ LIBEND2END_TEST_REQUEST_RESPONSE_WITH_METADATA_AND_PAYLOAD_OBJS = $(addprefix ob
libs/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a: $(ZLIB_DEP) $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_METADATA_AND_PAYLOAD_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_METADATA_AND_PAYLOAD_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a
+endif
@@ -2723,7 +2922,11 @@ LIBEND2END_TEST_REQUEST_RESPONSE_WITH_PAYLOAD_OBJS = $(addprefix objs/$(CONFIG)/
libs/$(CONFIG)/libend2end_test_request_response_with_payload.a: $(ZLIB_DEP) $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_PAYLOAD_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_request_response_with_payload.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_request_response_with_payload.a $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_PAYLOAD_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_request_response_with_payload.a
+endif
@@ -2745,7 +2948,11 @@ LIBEND2END_TEST_REQUEST_RESPONSE_WITH_TRAILING_METADATA_AND_PAYLOAD_OBJS = $(add
libs/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a: $(ZLIB_DEP) $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_TRAILING_METADATA_AND_PAYLOAD_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a $(LIBEND2END_TEST_REQUEST_RESPONSE_WITH_TRAILING_METADATA_AND_PAYLOAD_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a
+endif
@@ -2767,7 +2974,11 @@ LIBEND2END_TEST_SIMPLE_DELAYED_REQUEST_OBJS = $(addprefix objs/$(CONFIG)/, $(add
libs/$(CONFIG)/libend2end_test_simple_delayed_request.a: $(ZLIB_DEP) $(LIBEND2END_TEST_SIMPLE_DELAYED_REQUEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_simple_delayed_request.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_simple_delayed_request.a $(LIBEND2END_TEST_SIMPLE_DELAYED_REQUEST_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_simple_delayed_request.a
+endif
@@ -2789,7 +3000,11 @@ LIBEND2END_TEST_SIMPLE_REQUEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .
libs/$(CONFIG)/libend2end_test_simple_request.a: $(ZLIB_DEP) $(LIBEND2END_TEST_SIMPLE_REQUEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_simple_request.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_simple_request.a $(LIBEND2END_TEST_SIMPLE_REQUEST_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_simple_request.a
+endif
@@ -2811,7 +3026,11 @@ LIBEND2END_TEST_THREAD_STRESS_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o
libs/$(CONFIG)/libend2end_test_thread_stress.a: $(ZLIB_DEP) $(LIBEND2END_TEST_THREAD_STRESS_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_thread_stress.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_thread_stress.a $(LIBEND2END_TEST_THREAD_STRESS_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_thread_stress.a
+endif
@@ -2833,7 +3052,11 @@ LIBEND2END_TEST_WRITES_DONE_HANGS_WITH_PENDING_READ_OBJS = $(addprefix objs/$(CO
libs/$(CONFIG)/libend2end_test_writes_done_hangs_with_pending_read.a: $(ZLIB_DEP) $(LIBEND2END_TEST_WRITES_DONE_HANGS_WITH_PENDING_READ_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_test_writes_done_hangs_with_pending_read.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_test_writes_done_hangs_with_pending_read.a $(LIBEND2END_TEST_WRITES_DONE_HANGS_WITH_PENDING_READ_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_test_writes_done_hangs_with_pending_read.a
+endif
@@ -2874,7 +3097,11 @@ endif
libs/$(CONFIG)/libend2end_certs.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_CERTS_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/libend2end_certs.a
$(Q) $(AR) rcs libs/$(CONFIG)/libend2end_certs.a $(LIBEND2END_CERTS_OBJS)
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/libend2end_certs.a
+endif
@@ -5318,6 +5545,68 @@ endif
endif
+TIPS_CLIENT_SRC = \
+ examples/tips/client_main.cc \
+
+TIPS_CLIENT_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/tips_client: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/tips_client: $(TIPS_CLIENT_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+ $(E) "[LD] Linking $@"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client
+
+endif
+
+objs/$(CONFIG)/examples/tips/client_main.o: libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_tips_client: $(TIPS_CLIENT_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(TIPS_CLIENT_OBJS:.o=.dep)
+endif
+endif
+
+
+TIPS_CLIENT_TEST_SRC = \
+ examples/tips/client_test.cc \
+
+TIPS_CLIENT_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_TEST_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/tips_client_test: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/tips_client_test: $(TIPS_CLIENT_TEST_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+ $(E) "[LD] Linking $@"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_TEST_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client_test
+
+endif
+
+objs/$(CONFIG)/examples/tips/client_test.o: libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_tips_client_test: $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
QPS_CLIENT_SRC = \
gens/test/cpp/qps/qpstest.pb.cc \
test/cpp/qps/client.cc \
diff --git a/build.json b/build.json
index 5057bceff2..cacbfe2ae5 100644
--- a/build.json
+++ b/build.json
@@ -411,6 +411,22 @@
"test/cpp/end2end/async_test_server.cc",
"test/cpp/util/create_test_channel.cc"
]
+ },
+ {
+ "name": "tips_client_lib",
+ "build": "private",
+ "language": "c++",
+ "src": [
+ "examples/tips/label.proto",
+ "examples/tips/empty.proto",
+ "examples/tips/pubsub.proto",
+ "examples/tips/client.cc"
+ ],
+ "deps": [
+ "grpc++",
+ "grpc",
+ "gpr"
+ ]
}
],
"targets": [
@@ -1497,6 +1513,41 @@
"run": false
},
{
+ "name": "tips_client",
+ "build": "test",
+ "run": false,
+ "language": "c++",
+ "src": [
+ "examples/tips/client_main.cc"
+ ],
+ "deps": [
+ "tips_client_lib",
+ "grpc++_test_util",
+ "grpc_test_util",
+ "grpc++",
+ "grpc",
+ "gpr_test_util",
+ "gpr"
+ ]
+ },
+ {
+ "name": "tips_client_test",
+ "build": "test",
+ "language": "c++",
+ "src": [
+ "examples/tips/client_test.cc"
+ ],
+ "deps": [
+ "tips_client_lib",
+ "grpc++_test_util",
+ "grpc_test_util",
+ "grpc++",
+ "grpc",
+ "gpr_test_util",
+ "gpr"
+ ]
+ },
+ {
"name": "qps_client",
"build": "test",
"language": "c++",
diff --git a/examples/tips/client.cc b/examples/tips/client.cc
new file mode 100644
index 0000000000..695ff80e8a
--- /dev/null
+++ b/examples/tips/client.cc
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/client_context.h>
+
+#include "examples/tips/client.h"
+
+using tech::pubsub::Topic;
+using tech::pubsub::PublisherService;
+
+namespace grpc {
+namespace examples {
+namespace tips {
+
+Client::Client(std::shared_ptr<ChannelInterface> channel)
+ : stub_(PublisherService::NewStub(channel)) {
+}
+
+Status Client::CreateTopic(grpc::string topic) {
+ Topic request;
+ Topic response;
+ request.set_name(topic);
+ ClientContext context;
+
+ return stub_->CreateTopic(&context, request, &response);
+}
+
+} // namespace tips
+} // namespace examples
+} // namespace grpc
diff --git a/src/node/port_picker.js b/examples/tips/client.h
index ad82f2a7f8..3f4f1fd836 100644
--- a/src/node/port_picker.js
+++ b/examples/tips/client.h
@@ -31,22 +31,24 @@
*
*/
-var net = require('net');
+#include <grpc++/channel_interface.h>
+#include <grpc++/status.h>
-/**
- * Finds a free port that a server can bind to, in the format
- * "address:port"
- * @param {function(string)} cb The callback that should execute when the port
- * is available
- */
-function nextAvailablePort(cb) {
- var server = net.createServer();
- server.listen(function() {
- var address = server.address();
- server.close(function() {
- cb(address.address + ':' + address.port.toString());
- });
- });
-}
+#include "examples/tips/pubsub.pb.h"
+
+namespace grpc {
+namespace examples {
+namespace tips {
+
+class Client {
+ public:
+ Client(std::shared_ptr<grpc::ChannelInterface> channel);
+ Status CreateTopic(grpc::string topic);
+
+ private:
+ std::unique_ptr<tech::pubsub::PublisherService::Stub> stub_;
+};
-exports.nextAvailablePort = nextAvailablePort;
+} // namespace tips
+} // namespace examples
+} // namespace grpc
diff --git a/examples/tips/client_main.cc b/examples/tips/client_main.cc
new file mode 100644
index 0000000000..17567b6f17
--- /dev/null
+++ b/examples/tips/client_main.cc
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <google/gflags.h>
+#include <grpc++/channel_interface.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/status.h>
+
+#include "examples/tips/client.h"
+#include "test/cpp/util/create_test_channel.h"
+
+DEFINE_bool(enable_ssl, true, "Whether to use ssl/tls.");
+DEFINE_bool(use_prod_roots, true, "True to use SSL roots for production GFE");
+DEFINE_int32(server_port, 0, "Server port.");
+DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
+DEFINE_string(server_host_override, "foo.test.google.com",
+ "Override the server host which is sent in HTTP header");
+
+int main(int argc, char** argv) {
+ grpc_init();
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ gpr_log(GPR_INFO, "Start TIPS client");
+
+ GPR_ASSERT(FLAGS_server_port);
+ const int host_port_buf_size = 1024;
+ char host_port[host_port_buf_size];
+ snprintf(host_port, host_port_buf_size, "%s:%d", FLAGS_server_host.c_str(),
+ FLAGS_server_port);
+
+ std::shared_ptr<grpc::ChannelInterface> channel(
+ grpc::CreateTestChannel(host_port, FLAGS_server_host_override,
+ FLAGS_enable_ssl, FLAGS_use_prod_roots));
+
+ grpc::examples::tips::Client client(channel);
+ grpc::Status s = client.CreateTopic("test");
+ GPR_ASSERT(s.IsOk());
+
+ channel.reset();
+ grpc_shutdown();
+ return 0;
+}
diff --git a/examples/tips/client_test.cc b/examples/tips/client_test.cc
new file mode 100644
index 0000000000..69238f2c6f
--- /dev/null
+++ b/examples/tips/client_test.cc
@@ -0,0 +1,106 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/channel_arguments.h>
+#include <grpc++/channel_interface.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include <grpc++/server_context.h>
+#include <grpc++/status.h>
+#include <gtest/gtest.h>
+
+#include "examples/tips/client.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+using grpc::ChannelInterface;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+const char kTopic[] = "test topic";
+
+class PublishServiceImpl : public tech::pubsub::PublisherService::Service {
+ public:
+ Status CreateTopic(::grpc::ServerContext* context,
+ const ::tech::pubsub::Topic* request,
+ ::tech::pubsub::Topic* response) override {
+ EXPECT_EQ(request->name(), kTopic);
+ return Status::OK;
+ }
+};
+
+class End2endTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port;
+ // Setup server
+ ServerBuilder builder;
+ builder.AddPort(server_address_.str());
+ builder.RegisterService(service_.service());
+ server_ = builder.BuildAndStart();
+
+ channel_ = CreateChannel(server_address_.str(), ChannelArguments());
+ }
+
+ void TearDown() override { server_->Shutdown(); }
+
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ PublishServiceImpl service_;
+
+ std::shared_ptr<ChannelInterface> channel_;
+};
+
+TEST_F(End2endTest, CreateTopic) {
+ grpc::examples::tips::Client client(channel_);
+ client.CreateTopic(kTopic);
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc_test_init(argc, argv);
+ grpc_init();
+ ::testing::InitGoogleTest(&argc, argv);
+ gpr_log(GPR_INFO, "Start test ...");
+ int result = RUN_ALL_TESTS();
+ grpc_shutdown();
+ return result;
+}
diff --git a/examples/tips/empty.proto b/examples/tips/empty.proto
new file mode 100644
index 0000000000..adf66b5e61
--- /dev/null
+++ b/examples/tips/empty.proto
@@ -0,0 +1,13 @@
+syntax = "proto2";
+
+package proto2;
+
+// An empty message that you can re-use to avoid defining duplicated empty
+// messages in your project. A typical example is to use it as argument or the
+// return value of a service API. For instance:
+//
+// service Foo {
+// rpc Bar (proto2.Empty) returns (proto2.Empty) { };
+// };
+//
+message Empty {}
diff --git a/examples/tips/label.proto b/examples/tips/label.proto
new file mode 100644
index 0000000000..e93ac9dea3
--- /dev/null
+++ b/examples/tips/label.proto
@@ -0,0 +1,48 @@
+// Labels provide a way to associate user-defined metadata with various
+// objects. Labels may be used to organize objects into non-hierarchical
+// groups; think metadata tags attached to mp3s.
+
+syntax = "proto2";
+
+package tech.label;
+
+// A key-value pair applied to a given object.
+message Label {
+ // The key of a label is a syntactically valid URL (as per RFC 1738) with
+ // the "scheme" and initial slashes omitted and with the additional
+ // restrictions noted below. Each key should be globally unique. The
+ // "host" portion is called the "namespace" and is not necessarily
+ // resolvable to a network endpoint. Instead, the namespace indicates what
+ // system or entity defines the semantics of the label. Namespaces do not
+ // restrict the set of objects to which a label may be associated.
+ //
+ // Keys are defined by the following grammar:
+ //
+ // key = hostname "/" kpath
+ // kpath = ksegment *[ "/" ksegment ]
+ // ksegment = alphadigit | *[ alphadigit | "-" | "_" | "." ]
+ //
+ // where "hostname" and "alphadigit" are defined as in RFC 1738.
+ //
+ // Example key:
+ // spanner.google.com/universe
+ required string key = 1;
+
+ // The value of the label.
+ oneof value {
+ // A string value.
+ string str_value = 2;
+ // An integer value.
+ int64 num_value = 3;
+ }
+}
+
+// A collection of labels, such as the set of all labels attached to an
+// object. Each label in the set must have a different key.
+//
+// Users should prefer to embed "repeated Label" directly when possible.
+// This message should only be used in cases where that isn't possible (e.g.
+// with oneof).
+message Labels {
+ repeated Label label = 1;
+}
diff --git a/examples/tips/pubsub.proto b/examples/tips/pubsub.proto
new file mode 100644
index 0000000000..0b3bd5d012
--- /dev/null
+++ b/examples/tips/pubsub.proto
@@ -0,0 +1,702 @@
+// Specification of the Pubsub API.
+
+syntax = "proto2";
+
+import "examples/tips/empty.proto";
+import "examples/tips/label.proto";
+
+package tech.pubsub;
+
+// -----------------------------------------------------------------------------
+// Overview of the Pubsub API
+// -----------------------------------------------------------------------------
+
+// This file describes an API for a Pubsub system. This system provides a
+// reliable many-to-many communication mechanism between independently written
+// publishers and subscribers where the publisher publishes messages to "topics"
+// and each subscriber creates a "subscription" and consumes messages from it.
+//
+// (a) The pubsub system maintains bindings between topics and subscriptions.
+// (b) A publisher publishes messages into a topic.
+// (c) The pubsub system delivers messages from topics into relevant
+// subscriptions.
+// (d) A subscriber receives pending messages from its subscription and
+// acknowledges or nacks each one to the pubsub system.
+// (e) The pubsub system removes acknowledged messages from that subscription.
+
+// -----------------------------------------------------------------------------
+// Data Model
+// -----------------------------------------------------------------------------
+
+// The data model consists of the following:
+//
+// * Topic: A topic is a resource to which messages are published by publishers.
+// Topics are named, and the name of the topic is unique within the pubsub
+// system.
+//
+// * Subscription: A subscription records the subscriber's interest in a topic.
+// It can optionally include a query to select a subset of interesting
+// messages. The pubsub system maintains a logical cursor tracking the
+// matching messages which still need to be delivered and acked so that
+// they can retried as needed. The set of messages that have not been
+// acknowledged is called the subscription backlog.
+//
+// * Message: A message is a unit of data that flows in the system. It contains
+// opaque data from the publisher along with its labels.
+//
+// * Message Labels (optional): A set of opaque key, value pairs assigned
+// by the publisher which the subscriber can use for filtering out messages
+// in the topic. For example, a label with key "foo.com/device_type" and
+// value "mobile" may be added for messages that are only relevant for a
+// mobile subscriber; a subscriber on a phone may decide to create a
+// subscription only for messages that have this label.
+
+// -----------------------------------------------------------------------------
+// Publisher Flow
+// -----------------------------------------------------------------------------
+
+// A publisher publishes messages to the topic using the Publish request:
+//
+// PubsubMessage message;
+// message.set_data("....");
+// Label label;
+// label.set_key("foo.com/key1");
+// label.set_str_value("value1");
+// message.add_label(label);
+// PublishRequest request;
+// request.set_topic("topicName");
+// request.set_message(message);
+// PublisherService.Publish(request);
+
+// -----------------------------------------------------------------------------
+// Subscriber Flow
+// -----------------------------------------------------------------------------
+
+// The subscriber part of the API is richer than the publisher part and has a
+// number of concepts w.r.t. subscription creation and monitoring:
+//
+// (1) A subscriber creates a subscription using the CreateSubscription call.
+// It may specify an optional "query" to indicate that it wants to receive
+// only messages with a certain set of labels using the label query syntax.
+// It may also specify an optional truncation policy to indicate when old
+// messages from the subcription can be removed.
+//
+// (2) A subscriber receives messages in one of two ways: via push or pull.
+//
+// (a) To receive messages via push, the PushConfig field must be specified in
+// the Subscription parameter when creating a subscription. The PushConfig
+// specifies an endpoint at which the subscriber must expose the
+// PushEndpointService. Messages are received via the HandlePubsubEvent
+// method. The push subscriber responds to the HandlePubsubEvent method
+// with a result code that indicates one of three things: Ack (the message
+// has been successfully processed and the Pubsub system may delete it),
+// Nack (the message has been rejected, the Pubsub system should resend it
+// at a later time), or Push-Back (this is a Nack with the additional
+// semantics that the subscriber is overloaded and the pubsub system should
+// back off on the rate at which it is invoking HandlePubsubEvent). The
+// endpoint may be a load balancer for better scalability.
+//
+// (b) To receive messages via pull a subscriber calls the Pull method on the
+// SubscriberService to get messages from the subscription. For each
+// individual message, the subscriber may use the ack_id received in the
+// PullResponse to Ack the message, Nack the message, or modify the ack
+// deadline with ModifyAckDeadline. See the
+// Subscription.ack_deadline_seconds field documentation for details on the
+// ack deadline behavior.
+//
+// Note: Messages may be consumed in parallel by multiple subscribers making
+// Pull calls to the same subscription; this will result in the set of
+// messages from the subscription being shared and each subscriber
+// receiving a subset of the messages.
+//
+// (4) The subscriber can explicitly truncate the current subscription.
+//
+// (5) "Truncated" events are delivered when a subscription is
+// truncated, whether due to the subscription's truncation policy
+// or an explicit request from the subscriber.
+//
+// Subscription creation:
+//
+// Subscription subscription;
+// subscription.set_topic("topicName");
+// subscription.set_name("subscriptionName");
+// subscription.push_config().set_push_endpoint("machinename:8888");
+// SubscriberService.CreateSubscription(subscription);
+//
+// Consuming messages via push:
+//
+// TODO(eschapira): Add HTTP push example.
+//
+// The port 'machinename:8888' must be bound to a stubby server that implements
+// the PushEndpointService with the following method:
+//
+// int HandlePubsubEvent(PubsubEvent event) {
+// if (event.subscription().equals("subscriptionName")) {
+// if (event.has_message()) {
+// Process(event.message().data());
+// } else if (event.truncated()) {
+// ProcessTruncatedEvent();
+// }
+// }
+// return OK; // This return code implies an acknowledgment
+// }
+//
+// Consuming messages via pull:
+//
+// The subscription must be created without setting the push_config field.
+//
+// PullRequest pull_request;
+// pull_request.set_subscription("subscriptionName");
+// pull_request.set_return_immediately(false);
+// while (true) {
+// PullResponse pull_response;
+// if (SubscriberService.Pull(pull_request, pull_response) == OK) {
+// PubsubEvent event = pull_response.pubsub_event();
+// if (event.has_message()) {
+// Process(event.message().data());
+// } else if (event.truncated()) {
+// ProcessTruncatedEvent();
+// }
+// AcknowledgeRequest ack_request;
+// ackRequest.set_subscription("subscriptionName");
+// ackRequest.set_ack_id(pull_response.ack_id());
+// SubscriberService.Acknowledge(ack_request);
+// }
+// }
+
+// -----------------------------------------------------------------------------
+// Reliability Semantics
+// -----------------------------------------------------------------------------
+
+// When a subscriber successfully creates a subscription using
+// Subscriber.CreateSubscription, it establishes a "subscription point" with
+// respect to that subscription - the subscriber is guaranteed to receive any
+// message published after this subscription point that matches the
+// subscription's query. Note that messages published before the Subscription
+// point may or may not be delivered.
+//
+// If the system truncates the subscription according to the specified
+// truncation policy, the system delivers a subscription status event with the
+// "truncated" field set to true. We refer to such events as "truncation
+// events". A truncation event:
+//
+// * Informs the subscriber that part of the subscription messages have been
+// discarded. The subscriber may want to recover from the message loss, e.g.,
+// by resyncing its state with its backend.
+// * Establishes a new subscription point, i.e., the subscriber is guaranteed to
+// receive all changes published after the trunction event is received (or
+// until another truncation event is received).
+//
+// Note that messages are not delivered in any particular order by the pubsub
+// system. Furthermore, the system guarantees at-least-once delivery
+// of each message or truncation events until acked.
+
+// -----------------------------------------------------------------------------
+// Deletion
+// -----------------------------------------------------------------------------
+
+// Both topics and subscriptions may be deleted. Deletion of a topic implies
+// deletion of all attached subscriptions.
+//
+// When a subscription is deleted directly by calling DeleteSubscription, all
+// messages are immediately dropped. If it is a pull subscriber, future pull
+// requests will return NOT_FOUND.
+//
+// When a topic is deleted all corresponding subscriptions are immediately
+// deleted, and subscribers experience the same behavior as directly deleting
+// the subscription.
+
+// -----------------------------------------------------------------------------
+// The Publisher service and its protos.
+// -----------------------------------------------------------------------------
+
+// The service that an application uses to manipulate topics, and to send
+// messages to a topic.
+service PublisherService {
+
+ // Creates the given topic with the given name.
+ rpc CreateTopic(Topic) returns (Topic) {
+ }
+
+ // Adds a message to the topic. Returns NOT_FOUND if the topic does not
+ // exist.
+ // (-- For different error code values returned via Stubby, see
+ // util/task/codes.proto. --)
+ rpc Publish(PublishRequest) returns (proto2.Empty) {
+ }
+
+ // Adds one or more messages to the topic. Returns NOT_FOUND if the topic does
+ // not exist.
+ rpc PublishBatch(PublishBatchRequest) returns (PublishBatchResponse) {
+ }
+
+ // Gets the configuration of a topic. Since the topic only has the name
+ // attribute, this method is only useful to check the existence of a topic.
+ // If other attributes are added in the future, they will be returned here.
+ rpc GetTopic(GetTopicRequest) returns (Topic) {
+ }
+
+ // Lists matching topics.
+ rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse) {
+ }
+
+ // Deletes the topic with the given name. All subscriptions to this topic
+ // are also deleted. Returns NOT_FOUND if the topic does not exist.
+ // After a topic is deleted, a new topic may be created with the same name.
+ rpc DeleteTopic(DeleteTopicRequest) returns (proto2.Empty) {
+ }
+}
+
+// A topic resource.
+message Topic {
+ // Name of the topic.
+ optional string name = 1;
+}
+
+// A message data and its labels.
+message PubsubMessage {
+ // The message payload.
+ optional bytes data = 1;
+
+ // Optional list of labels for this message. Keys in this collection must
+ // be unique.
+ //(-- TODO(eschapira): Define how key namespace may be scoped to the topic.--)
+ repeated tech.label.Label label = 2;
+
+ // ID of this message assigned by the server at publication time. Guaranteed
+ // to be unique within the topic. This value may be read by a subscriber
+ // that receives a PubsubMessage via a Pull call or a push delivery. It must
+ // not be populated by a publisher in a Publish call.
+ optional string message_id = 3;
+}
+
+// Request for the GetTopic method.
+message GetTopicRequest {
+ // The name of the topic to get.
+ optional string topic = 1;
+}
+
+// Request for the Publish method.
+message PublishRequest {
+ // The message in the request will be published on this topic.
+ optional string topic = 1;
+
+ // The message to publish.
+ optional PubsubMessage message = 2;
+}
+
+// Request for the PublishBatch method.
+message PublishBatchRequest {
+ // The messages in the request will be published on this topic.
+ optional string topic = 1;
+
+ // The messages to publish.
+ repeated PubsubMessage messages = 2;
+}
+
+// Response for the PublishBatch method.
+message PublishBatchResponse {
+ // The server-assigned ID of each published message, in the same order as
+ // the messages in the request. IDs are guaranteed to be unique within
+ // the topic.
+ repeated string message_ids = 1;
+}
+
+// Request for the ListTopics method.
+message ListTopicsRequest {
+ // A valid label query expression.
+ // (-- Which labels are required or supported is implementation-specific. --)
+ optional string query = 1;
+
+ // Maximum number of topics to return.
+ // (-- If not specified or <= 0, the implementation will select a reasonable
+ // value. --)
+ optional int32 max_results = 2;
+
+ // The value obtained in the last <code>ListTopicsResponse</code>
+ // for continuation.
+ optional string page_token = 3;
+
+}
+
+// Response for the ListTopics method.
+message ListTopicsResponse {
+ // The resulting topics.
+ repeated Topic topic = 1;
+
+ // If not empty, indicates that there are more topics that match the request,
+ // and this value should be passed to the next <code>ListTopicsRequest</code>
+ // to continue.
+ optional string next_page_token = 2;
+}
+
+// Request for the Delete method.
+message DeleteTopicRequest {
+ // Name of the topic to delete.
+ optional string topic = 1;
+}
+
+// -----------------------------------------------------------------------------
+// The Subscriber service and its protos.
+// -----------------------------------------------------------------------------
+
+// The service that an application uses to manipulate subscriptions and to
+// consume messages from a subscription via the pull method.
+service SubscriberService {
+
+ // Creates a subscription on a given topic for a given subscriber.
+ // If the subscription already exists, returns ALREADY_EXISTS.
+ // If the corresponding topic doesn't exist, returns NOT_FOUND.
+ //
+ // If the name is not provided in the request, the server will assign a random
+ // name for this subscription on the same project as the topic.
+ rpc CreateSubscription(Subscription) returns (Subscription) {
+ }
+
+ // Gets the configuration details of a subscription.
+ rpc GetSubscription(GetSubscriptionRequest) returns (Subscription) {
+ }
+
+ // Lists matching subscriptions.
+ rpc ListSubscriptions(ListSubscriptionsRequest)
+ returns (ListSubscriptionsResponse) {
+ }
+
+ // Deletes an existing subscription. All pending messages in the subscription
+ // are immediately dropped. Calls to Pull after deletion will return
+ // NOT_FOUND.
+ rpc DeleteSubscription(DeleteSubscriptionRequest) returns (proto2.Empty) {
+ }
+
+ // Removes all the pending messages in the subscription and releases the
+ // storage associated with them. Results in a truncation event to be sent to
+ // the subscriber. Messages added after this call returns are stored in the
+ // subscription as before.
+ rpc TruncateSubscription(TruncateSubscriptionRequest) returns (proto2.Empty) {
+ }
+
+ //
+ // Push subscriber calls.
+ //
+
+ // Modifies the <code>PushConfig</code> for a specified subscription.
+ // This method can be used to suspend the flow of messages to an endpoint
+ // by clearing the <code>PushConfig</code> field in the request. Messages
+ // will be accumulated for delivery even if no push configuration is
+ // defined or while the configuration is modified.
+ rpc ModifyPushConfig(ModifyPushConfigRequest) returns (proto2.Empty) {
+ }
+
+ //
+ // Pull Subscriber calls
+ //
+
+ // Pulls a single message from the server.
+ // If return_immediately is true, and no messages are available in the
+ // subscription, this method returns FAILED_PRECONDITION. The system is free
+ // to return an UNAVAILABLE error if no messages are available in a
+ // reasonable amount of time (to reduce system load).
+ rpc Pull(PullRequest) returns (PullResponse) {
+ }
+
+ // Pulls messages from the server. Returns an empty list if there are no
+ // messages available in the backlog. The system is free to return UNAVAILABLE
+ // if there are too many pull requests outstanding for the given subscription.
+ rpc PullBatch(PullBatchRequest) returns (PullBatchResponse) {
+ }
+
+ // Modifies the Ack deadline for a message received from a pull request.
+ rpc ModifyAckDeadline(ModifyAckDeadlineRequest) returns (proto2.Empty) {
+ }
+
+ // Acknowledges a particular received message: the Pub/Sub system can remove
+ // the given message from the subscription. Acknowledging a message whose
+ // Ack deadline has expired may succeed, but the message could have been
+ // already redelivered. Acknowledging a message more than once will not
+ // result in an error. This is only used for messages received via pull.
+ rpc Acknowledge(AcknowledgeRequest) returns (proto2.Empty) {
+ }
+
+ // Refuses processing a particular received message. The system will
+ // redeliver this message to some consumer of the subscription at some
+ // future time. This is only used for messages received via pull.
+ rpc Nack(NackRequest) returns (proto2.Empty) {
+ }
+}
+
+// A subscription resource.
+message Subscription {
+ // Name of the subscription.
+ optional string name = 1;
+
+ // The name of the topic from which this subscription is receiving messages.
+ optional string topic = 2;
+
+ // If <code>query</code> is non-empty, only messages on the subscriber's
+ // topic whose labels match the query will be returned. Otherwise all
+ // messages on the topic will be returned.
+ // (-- The query syntax is described in tech/label/proto/label_query.proto --)
+ optional string query = 3;
+
+ // The subscriber may specify requirements for truncating unacknowledged
+ // subscription entries. The system will honor the
+ // <code>CreateSubscription</code> request only if it can meet these
+ // requirements. If this field is not specified, messages are never truncated
+ // by the system.
+ optional TruncationPolicy truncation_policy = 4;
+
+ // Specifies which messages can be truncated by the system.
+ message TruncationPolicy {
+ oneof policy {
+ // If <code>max_bytes</code> is specified, the system is allowed to drop
+ // old messages to keep the combined size of stored messages under
+ // <code>max_bytes</code>. This is a hint; the system may keep more than
+ // this many bytes, but will make a best effort to keep the size from
+ // growing much beyond this parameter.
+ int64 max_bytes = 1;
+
+ // If <code>max_age_seconds</code> is specified, the system is allowed to
+ // drop messages that have been stored for at least this many seconds.
+ // This is a hint; the system may keep these messages, but will make a
+ // best effort to remove them when their maximum age is reached.
+ int64 max_age_seconds = 2;
+ }
+ }
+
+ // If push delivery is used with this subscription, this field is
+ // used to configure it.
+ optional PushConfig push_config = 5;
+
+ // For either push or pull delivery, the value is the maximum time after a
+ // subscriber receives a message before the subscriber should acknowledge or
+ // Nack the message. If the Ack deadline for a message passes without an
+ // Ack or a Nack, the Pub/Sub system will eventually redeliver the message.
+ // If a subscriber acknowledges after the deadline, the Pub/Sub system may
+ // accept the Ack, but it is possible that the message has been already
+ // delivered again. Multiple Acks to the message are allowed and will
+ // succeed.
+ //
+ // For push delivery, this value is used to set the request timeout for
+ // the call to the push endpoint.
+ //
+ // For pull delivery, this value is used as the initial value for the Ack
+ // deadline. It may be overridden for a specific pull request (message) with
+ // <code>ModifyAckDeadline</code>.
+ // While a message is outstanding (i.e. it has been delivered to a pull
+ // subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub
+ // system will not deliver that message to another pull subscriber
+ // (on a best-effort basis).
+ optional int32 ack_deadline_seconds = 6;
+
+ // If this parameter is set to n, the system is allowed to (but not required
+ // to) delete the subscription when at least n seconds have elapsed since the
+ // client presence was detected. (Presence is detected through any
+ // interaction using the subscription ID, including Pull(), Get(), or
+ // acknowledging a message.)
+ //
+ // If this parameter is not set, the subscription will stay live until
+ // explicitly deleted.
+ //
+ // Clients can detect such garbage collection when a Get call or a Pull call
+ // (for pull subscribers only) returns NOT_FOUND.
+ optional int64 garbage_collect_seconds = 7;
+}
+
+// Configuration for a push delivery endpoint.
+message PushConfig {
+ // A URL locating the endpoint to which messages should be pushed.
+ // For example, a Webhook endpoint might use "https://example.com/push".
+ // (-- An Android application might use "gcm:<REGID>", where <REGID> is a
+ // GCM registration id allocated for pushing messages to the application. --)
+ optional string push_endpoint = 1;
+}
+
+// An event indicating a received message or truncation event.
+message PubsubEvent {
+ // The subscription that received the event.
+ optional string subscription = 1;
+
+ oneof type {
+ // A received message.
+ PubsubMessage message = 2;
+
+ // Indicates that this subscription has been truncated.
+ bool truncated = 3;
+
+ // Indicates that this subscription has been deleted. (Note that pull
+ // subscribers will always receive NOT_FOUND in response in their pull
+ // request on the subscription, rather than seeing this boolean.)
+ bool deleted = 4;
+ }
+}
+
+// Request for the GetSubscription method.
+message GetSubscriptionRequest {
+ // The name of the subscription to get.
+ optional string subscription = 1;
+}
+
+// Request for the ListSubscriptions method.
+message ListSubscriptionsRequest {
+ // A valid label query expression.
+ // (-- Which labels are required or supported is implementation-specific.
+ // TODO(eschapira): This method must support to query by topic. We must
+ // define the key URI for the "topic" label. --)
+ optional string query = 1;
+
+ // Maximum number of subscriptions to return.
+ // (-- If not specified or <= 0, the implementation will select a reasonable
+ // value. --)
+ optional int32 max_results = 3;
+
+ // The value obtained in the last <code>ListSubscriptionsResponse</code>
+ // for continuation.
+ optional string page_token = 4;
+}
+
+// Response for the ListSubscriptions method.
+message ListSubscriptionsResponse {
+ // The subscriptions that match the request.
+ repeated Subscription subscription = 1;
+
+ // If not empty, indicates that there are more subscriptions that match the
+ // request and this value should be passed to the next
+ // <code>ListSubscriptionsRequest</code> to continue.
+ optional string next_page_token = 2;
+}
+
+// Request for the TruncateSubscription method.
+message TruncateSubscriptionRequest {
+ // The subscription that is being truncated.
+ optional string subscription = 1;
+}
+
+// Request for the DeleteSubscription method.
+message DeleteSubscriptionRequest {
+ // The subscription to delete.
+ optional string subscription = 1;
+}
+
+// Request for the ModifyPushConfig method.
+message ModifyPushConfigRequest {
+ // The name of the subscription.
+ optional string subscription = 1;
+
+ // An empty <code>push_config</code> indicates that the Pub/Sub system should
+ // pause pushing messages from the given subscription.
+ optional PushConfig push_config = 2;
+}
+
+// -----------------------------------------------------------------------------
+// The protos used by a pull subscriber.
+// -----------------------------------------------------------------------------
+
+// Request for the Pull method.
+message PullRequest {
+ // The subscription from which a message should be pulled.
+ optional string subscription = 1;
+
+ // If this is specified as true the system will respond immediately even if
+ // it is not able to return a message in the Pull response. Otherwise the
+ // system is allowed to wait until at least one message is available rather
+ // than returning FAILED_PRECONDITION. The client may cancel the request if
+ // it does not wish to wait any longer for the response.
+ optional bool return_immediately = 2;
+}
+
+// Either a <code>PubsubMessage</code> or a truncation event. One of these two
+// must be populated.
+message PullResponse {
+ // This ID must be used to acknowledge the received event or message.
+ optional string ack_id = 1;
+
+ // A pubsub message or truncation event.
+ optional PubsubEvent pubsub_event = 2;
+}
+
+// Request for the PullBatch method.
+message PullBatchRequest {
+ // The subscription from which messages should be pulled.
+ optional string subscription = 1;
+
+ // If this is specified as true the system will respond immediately even if
+ // it is not able to return a message in the Pull response. Otherwise the
+ // system is allowed to wait until at least one message is available rather
+ // than returning no messages. The client may cancel the request if it does
+ // not wish to wait any longer for the response.
+ optional bool return_immediately = 2;
+
+ // The maximum number of PubsubEvents returned for this request. The Pub/Sub
+ // system may return fewer than the number of events specified.
+ optional int32 max_events = 3;
+}
+
+// Response for the PullBatch method.
+message PullBatchResponse {
+
+ // Received Pub/Sub messages or status events. The Pub/Sub system will return
+ // zero messages if there are no more messages available in the backlog. The
+ // Pub/Sub system may return fewer than the max_events requested even if
+ // there are more messages available in the backlog.
+ repeated PullResponse pull_responses = 2;
+}
+
+// Request for the ModifyAckDeadline method.
+message ModifyAckDeadlineRequest {
+ // The name of the subscription from which messages are being pulled.
+ optional string subscription = 1;
+
+ // The acknowledgment ID.
+ optional string ack_id = 2;
+
+ // The new Ack deadline. Must be >= 0.
+ optional int32 ack_deadline_seconds = 3;
+}
+
+// Request for the Acknowledge method.
+message AcknowledgeRequest {
+ // The subscription whose message is being acknowledged.
+ optional string subscription = 1;
+
+ // The acknowledgment ID for the message being acknowledged. This was
+ // returned by the Pub/Sub system in the Pull response.
+ repeated string ack_id = 2;
+}
+
+// Request for the Nack method.
+message NackRequest {
+ // The subscription whose message is being Nacked.
+ optional string subscription = 1;
+
+ // The acknowledgment ID for the message being refused. This was returned by
+ // the Pub/Sub system in the Pull response.
+ repeated string ack_id = 2;
+}
+
+// -----------------------------------------------------------------------------
+// The service and protos used by a push subscriber.
+// -----------------------------------------------------------------------------
+
+// The service that a subscriber uses to handle messages sent via push
+// delivery.
+// This service is not currently exported for HTTP clients.
+// TODO(eschapira): Explain HTTP subscribers.
+service PushEndpointService {
+ // Sends a <code>PubsubMessage</code> or a subscription status event to a
+ // push endpoint.
+ // The push endpoint responds with an empty message and a code from
+ // util/task/codes.proto. The following codes have a particular meaning to the
+ // Pub/Sub system:
+ // OK - This is interpreted by Pub/Sub as Ack.
+ // ABORTED - This is intepreted by Pub/Sub as a Nack, without implying
+ // pushback for congestion control. The Pub/Sub system will
+ // retry this message at a later time.
+ // UNAVAILABLE - This is intepreted by Pub/Sub as a Nack, with the additional
+ // semantics of push-back. The Pub/Sub system will use an AIMD
+ // congestion control algorithm to backoff the rate of sending
+ // messages from this subscription.
+ // Any other code, or a failure to respond, will be interpreted in the same
+ // way as ABORTED; i.e. the system will retry the message at a later time to
+ // ensure reliable delivery.
+ rpc HandlePubsubEvent(PubsubEvent) returns (proto2.Empty);
+}
diff --git a/include/grpc++/server_credentials.h b/include/grpc++/server_credentials.h
index cf08870f42..91504ae0ae 100644
--- a/include/grpc++/server_credentials.h
+++ b/include/grpc++/server_credentials.h
@@ -35,6 +35,7 @@
#define __GRPCPP_SERVER_CREDENTIALS_H_
#include <memory>
+#include <vector>
#include <grpc++/config.h>
@@ -60,9 +61,12 @@ class ServerCredentials final {
// Options to create ServerCredentials with SSL
struct SslServerCredentialsOptions {
+ struct PemKeyCertPair{
+ grpc::string private_key;
+ grpc::string cert_chain;
+ };
grpc::string pem_root_certs;
- grpc::string pem_private_key;
- grpc::string pem_cert_chain;
+ std::vector<PemKeyCertPair> pem_key_cert_pairs;
};
// Factory for building different types of ServerCredentials
diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h
index 3c5b0de195..f03f61d84e 100644
--- a/include/grpc/grpc.h
+++ b/include/grpc/grpc.h
@@ -428,7 +428,8 @@ grpc_server *grpc_server_create(grpc_completion_queue *cq,
REQUIRES: server not started */
int grpc_server_add_http2_port(grpc_server *server, const char *addr);
-/* Add a secure port to server; returns 1 on success, 0 on failure
+/* Add a secure port to server.
+ Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr);
diff --git a/include/grpc/grpc_security.h b/include/grpc/grpc_security.h
index 644b31f763..0732a8f83a 100644
--- a/include/grpc/grpc_security.h
+++ b/include/grpc/grpc_security.h
@@ -54,22 +54,26 @@ void grpc_credentials_release(grpc_credentials *creds);
/* Creates default credentials. */
grpc_credentials *grpc_default_credentials_create(void);
+/* Object that holds a private key / certificate chain pair in PEM format. */
+typedef struct {
+ /* private_key is the NULL-terminated string containing the PEM encoding of
+ the client's private key. */
+ const char *private_key;
+
+ /* cert_chain is the NULL-terminated string containing the PEM encoding of
+ the client's certificate chain. */
+ const char *cert_chain;
+} grpc_ssl_pem_key_cert_pair;
+
/* Creates an SSL credentials object.
- - pem_roots_cert is the buffer containing the PEM encoding of the server
- root certificates. This parameter cannot be NULL.
- - pem_roots_cert_size is the size of the associated buffer.
- - pem_private_key is the buffer containing the PEM encoding of the client's
- private key. This parameter can be NULL if the client does not have a
- private key.
- - pem_private_key_size is the size of the associated buffer.
- - pem_cert_chain is the buffer containing the PEM encoding of the client's
- certificate chain. This parameter can be NULL if the client does not have
- a certificate chain.
- - pem_cert_chain_size is the size of the associated buffer. */
+ - pem_roots_cert is the NULL-terminated string containing the PEM encoding
+ of the server root certificates. If this parameter is NULL, the default
+ roots will be used.
+ - pem_key_cert_pair is a pointer on the object containing client's private
+ key and certificate chain. This parameter can be NULL if the client does
+ not have such a key/cert pair. */
grpc_credentials *grpc_ssl_credentials_create(
- const unsigned char *pem_root_certs, size_t pem_root_certs_size,
- const unsigned char *pem_private_key, size_t pem_private_key_size,
- const unsigned char *pem_cert_chain, size_t pem_cert_chain_size);
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair);
/* Creates a composite credentials object. */
grpc_credentials *grpc_composite_credentials_create(grpc_credentials *creds1,
@@ -130,22 +134,16 @@ typedef struct grpc_server_credentials grpc_server_credentials;
void grpc_server_credentials_release(grpc_server_credentials *creds);
/* Creates an SSL server_credentials object.
- TODO(jboeuf): Change the constructor so that it can support multiple
- key/cert pairs.
- - pem_roots_cert is the buffer containing the PEM encoding of the server
- root certificates. This parameter may be NULL if the server does not want
- the client to be authenticated with SSL.
- - pem_roots_cert_size is the size of the associated buffer.
- - pem_private_key is the buffer containing the PEM encoding of the client's
- private key. This parameter cannot be NULL.
- - pem_private_key_size is the size of the associated buffer.
- - pem_cert_chain is the buffer containing the PEM encoding of the client's
- certificate chain. This parameter cannot be NULL.
- - pem_cert_chain_size is the size of the associated buffer. */
+ - pem_roots_cert is the NULL-terminated string containing the PEM encoding of
+ the client root certificates. This parameter may be NULL if the server does
+ not want the client to be authenticated with SSL.
+ - pem_key_cert_pairs is an array private key / certificate chains of the
+ server. This parameter cannot be NULL.
+ - num_key_cert_pairs indicates the number of items in the private_key_files
+ and cert_chain_files parameters. It should be at least 1. */
grpc_server_credentials *grpc_ssl_server_credentials_create(
- const unsigned char *pem_root_certs, size_t pem_root_certs_size,
- const unsigned char *pem_private_key, size_t pem_private_key_size,
- const unsigned char *pem_cert_chain, size_t pem_cert_chain_size);
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs);
/* Creates a fake server transport security credentials object for testing. */
grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
diff --git a/src/core/iomgr/pollset_kick_posix.c b/src/core/iomgr/pollset_kick_posix.c
index a04a6b430a..4386cf5a46 100644
--- a/src/core/iomgr/pollset_kick_posix.c
+++ b/src/core/iomgr/pollset_kick_posix.c
@@ -47,6 +47,9 @@
/* This implementation is based on a freelist of pipes. */
+#define GRPC_MAX_CACHED_PIPES 50
+#define GRPC_PIPE_LOW_WATERMARK 25
+
typedef struct grpc_kick_pipe_info {
int pipe_read_fd;
int pipe_write_fd;
@@ -54,14 +57,16 @@ typedef struct grpc_kick_pipe_info {
} grpc_kick_pipe_info;
static grpc_kick_pipe_info *pipe_freelist = NULL;
+static int pipe_freelist_count = 0;
static gpr_mu pipe_freelist_mu;
-static grpc_kick_pipe_info *allocate_pipe() {
+static grpc_kick_pipe_info *allocate_pipe(void) {
grpc_kick_pipe_info *info;
gpr_mu_lock(&pipe_freelist_mu);
if (pipe_freelist != NULL) {
info = pipe_freelist;
pipe_freelist = pipe_freelist->next;
+ --pipe_freelist_count;
} else {
int pipefd[2];
/* TODO(klempner): Make this nonfatal */
@@ -77,11 +82,26 @@ static grpc_kick_pipe_info *allocate_pipe() {
return info;
}
+static void destroy_pipe(void) {
+ /* assumes pipe_freelist_mu is held */
+ grpc_kick_pipe_info *current = pipe_freelist;
+ pipe_freelist = pipe_freelist->next;
+ pipe_freelist_count--;
+ close(current->pipe_read_fd);
+ close(current->pipe_write_fd);
+ gpr_free(current);
+}
+
static void free_pipe(grpc_kick_pipe_info *pipe_info) {
- /* TODO(klempner): Start closing pipes if the free list gets too large */
gpr_mu_lock(&pipe_freelist_mu);
pipe_info->next = pipe_freelist;
pipe_freelist = pipe_info;
+ pipe_freelist_count++;
+ if (pipe_freelist_count > GRPC_MAX_CACHED_PIPES) {
+ while (pipe_freelist_count > GRPC_PIPE_LOW_WATERMARK) {
+ destroy_pipe();
+ }
+ }
gpr_mu_unlock(&pipe_freelist_mu);
}
@@ -92,11 +112,7 @@ void grpc_pollset_kick_global_init() {
void grpc_pollset_kick_global_destroy() {
while (pipe_freelist != NULL) {
- grpc_kick_pipe_info *current = pipe_freelist;
- pipe_freelist = pipe_freelist->next;
- close(current->pipe_read_fd);
- close(current->pipe_write_fd);
- gpr_free(current);
+ destroy_pipe();
}
gpr_mu_destroy(&pipe_freelist_mu);
}
diff --git a/src/core/iomgr/socket_utils_common_posix.c b/src/core/iomgr/socket_utils_common_posix.c
index 4b1ae3d4c7..3a0639f356 100644
--- a/src/core/iomgr/socket_utils_common_posix.c
+++ b/src/core/iomgr/socket_utils_common_posix.c
@@ -103,7 +103,7 @@ int grpc_set_socket_reuse_addr(int fd, int reuse) {
socklen_t intlen = sizeof(newval);
return 0 == setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) &&
0 == getsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &newval, &intlen) &&
- newval == val;
+ (newval != 0) == val;
}
/* disable nagle */
@@ -113,7 +113,7 @@ int grpc_set_socket_low_latency(int fd, int low_latency) {
socklen_t intlen = sizeof(newval);
return 0 == setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) &&
0 == getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &newval, &intlen) &&
- newval == val;
+ (newval != 0) == val;
}
static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT;
diff --git a/src/core/iomgr/socket_utils_posix.c b/src/core/iomgr/socket_utils_posix.c
index e8c8071037..06c5033d45 100644
--- a/src/core/iomgr/socket_utils_posix.c
+++ b/src/core/iomgr/socket_utils_posix.c
@@ -50,12 +50,22 @@ int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
fd = accept(sockfd, addr, addrlen);
if (fd >= 0) {
- flags = fcntl(fd, F_GETFL, 0);
- flags |= nonblock ? O_NONBLOCK : 0;
- flags |= cloexec ? FD_CLOEXEC : 0;
- GPR_ASSERT(fcntl(fd, F_SETFL, flags) == 0);
+ if (nonblock) {
+ flags = fcntl(fd, F_GETFL, 0);
+ if (flags < 0) goto close_and_error;
+ if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0) goto close_and_error;
+ }
+ if (cloexec) {
+ flags = fcntl(fd, F_GETFD, 0);
+ if (flags < 0) goto close_and_error;
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) != 0) goto close_and_error;
+ }
}
return fd;
+
+close_and_error:
+ close(fd);
+ return -1;
}
#endif /* GPR_POSIX_SOCKETUTILS */
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index f1e5a6a8cd..2d6c6a73c2 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -256,7 +256,7 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
if (s->nports == s->port_capacity) {
s->port_capacity *= 2;
s->ports =
- gpr_realloc(s->ports, sizeof(server_port *) * s->port_capacity);
+ gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
}
sp = &s->ports[s->nports++];
sp->server = s;
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 006d863e27..628963e46c 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -139,7 +139,7 @@ typedef struct {
typedef struct {
grpc_server_credentials base;
- grpc_ssl_config config;
+ grpc_ssl_server_config config;
} grpc_ssl_server_credentials;
static void ssl_destroy(grpc_credentials *creds) {
@@ -152,9 +152,24 @@ static void ssl_destroy(grpc_credentials *creds) {
static void ssl_server_destroy(grpc_server_credentials *creds) {
grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
+ size_t i;
+ for (i = 0; i < c->config.num_key_cert_pairs; i++) {
+ if (c->config.pem_private_keys[i] != NULL) {
+ gpr_free(c->config.pem_private_keys[i]);
+ }
+ if (c->config.pem_cert_chains[i]!= NULL) {
+ gpr_free(c->config.pem_cert_chains[i]);
+ }
+ }
+ if (c->config.pem_private_keys != NULL) gpr_free(c->config.pem_private_keys);
+ if (c->config.pem_private_keys_sizes != NULL) {
+ gpr_free(c->config.pem_private_keys_sizes);
+ }
+ if (c->config.pem_cert_chains != NULL) gpr_free(c->config.pem_cert_chains);
+ if (c->config.pem_cert_chains_sizes != NULL) {
+ gpr_free(c->config.pem_cert_chains_sizes);
+ }
if (c->config.pem_root_certs != NULL) gpr_free(c->config.pem_root_certs);
- if (c->config.pem_private_key != NULL) gpr_free(c->config.pem_private_key);
- if (c->config.pem_cert_chain != NULL) gpr_free(c->config.pem_cert_chain);
gpr_free(creds);
}
@@ -179,7 +194,7 @@ const grpc_ssl_config *grpc_ssl_credentials_get_config(
}
}
-const grpc_ssl_config *grpc_ssl_server_credentials_get_config(
+const grpc_ssl_server_config *grpc_ssl_server_credentials_get_config(
const grpc_server_credentials *creds) {
if (creds == NULL || strcmp(creds->type, GRPC_CREDENTIALS_TYPE_SSL)) {
return NULL;
@@ -189,57 +204,89 @@ const grpc_ssl_config *grpc_ssl_server_credentials_get_config(
}
}
-static void ssl_build_config(const unsigned char *pem_root_certs,
- size_t pem_root_certs_size,
- const unsigned char *pem_private_key,
- size_t pem_private_key_size,
- const unsigned char *pem_cert_chain,
- size_t pem_cert_chain_size,
+static void ssl_copy_key_material(const char *input, unsigned char **output,
+ size_t *output_size) {
+ *output_size = strlen(input);
+ *output = gpr_malloc(*output_size);
+ memcpy(*output, input, *output_size);
+}
+
+static void ssl_build_config(const char *pem_root_certs,
+ grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
grpc_ssl_config *config) {
+ if (pem_root_certs == NULL) {
+ /* TODO(jboeuf): Get them from the environment. */
+ gpr_log(GPR_ERROR, "Default SSL roots not yet implemented.");
+ } else {
+ ssl_copy_key_material(pem_root_certs, &config->pem_root_certs,
+ &config->pem_root_certs_size);
+ }
+
+ if (pem_key_cert_pair != NULL) {
+ GPR_ASSERT(pem_key_cert_pair->private_key != NULL);
+ GPR_ASSERT(pem_key_cert_pair->cert_chain != NULL);
+ ssl_copy_key_material(pem_key_cert_pair->private_key,
+ &config->pem_private_key,
+ &config->pem_private_key_size);
+ ssl_copy_key_material(pem_key_cert_pair->cert_chain,
+ &config->pem_cert_chain,
+ &config->pem_cert_chain_size);
+ }
+}
+
+static void ssl_build_server_config(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs, grpc_ssl_server_config *config) {
+ size_t i;
if (pem_root_certs != NULL) {
- config->pem_root_certs = gpr_malloc(pem_root_certs_size);
- memcpy(config->pem_root_certs, pem_root_certs, pem_root_certs_size);
- config->pem_root_certs_size = pem_root_certs_size;
+ ssl_copy_key_material(pem_root_certs, &config->pem_root_certs,
+ &config->pem_root_certs_size);
}
- if (pem_private_key != NULL) {
- config->pem_private_key = gpr_malloc(pem_private_key_size);
- memcpy(config->pem_private_key, pem_private_key, pem_private_key_size);
- config->pem_private_key_size = pem_private_key_size;
+ if (num_key_cert_pairs > 0) {
+ GPR_ASSERT(pem_key_cert_pairs != NULL);
+ config->pem_private_keys =
+ gpr_malloc(num_key_cert_pairs * sizeof(unsigned char *));
+ config->pem_cert_chains =
+ gpr_malloc(num_key_cert_pairs * sizeof(unsigned char *));
+ config->pem_private_keys_sizes =
+ gpr_malloc(num_key_cert_pairs * sizeof(size_t));
+ config->pem_cert_chains_sizes =
+ gpr_malloc(num_key_cert_pairs * sizeof(size_t));
}
- if (pem_cert_chain != NULL) {
- config->pem_cert_chain = gpr_malloc(pem_cert_chain_size);
- memcpy(config->pem_cert_chain, pem_cert_chain, pem_cert_chain_size);
- config->pem_cert_chain_size = pem_cert_chain_size;
+ config->num_key_cert_pairs = num_key_cert_pairs;
+ for (i = 0; i < num_key_cert_pairs; i++) {
+ GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL);
+ GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL);
+ ssl_copy_key_material(pem_key_cert_pairs[i].private_key,
+ &config->pem_private_keys[i],
+ &config->pem_private_keys_sizes[i]);
+ ssl_copy_key_material(pem_key_cert_pairs[i].cert_chain,
+ &config->pem_cert_chains[i],
+ &config->pem_cert_chains_sizes[i]);
}
}
grpc_credentials *grpc_ssl_credentials_create(
- const unsigned char *pem_root_certs, size_t pem_root_certs_size,
- const unsigned char *pem_private_key, size_t pem_private_key_size,
- const unsigned char *pem_cert_chain, size_t pem_cert_chain_size) {
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair) {
grpc_ssl_credentials *c = gpr_malloc(sizeof(grpc_ssl_credentials));
memset(c, 0, sizeof(grpc_ssl_credentials));
c->base.type = GRPC_CREDENTIALS_TYPE_SSL;
c->base.vtable = &ssl_vtable;
gpr_ref_init(&c->base.refcount, 1);
- ssl_build_config(pem_root_certs, pem_root_certs_size, pem_private_key,
- pem_private_key_size, pem_cert_chain, pem_cert_chain_size,
- &c->config);
+ ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config);
return &c->base;
}
grpc_server_credentials *grpc_ssl_server_credentials_create(
- const unsigned char *pem_root_certs, size_t pem_root_certs_size,
- const unsigned char *pem_private_key, size_t pem_private_key_size,
- const unsigned char *pem_cert_chain, size_t pem_cert_chain_size) {
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs) {
grpc_ssl_server_credentials *c =
gpr_malloc(sizeof(grpc_ssl_server_credentials));
memset(c, 0, sizeof(grpc_ssl_server_credentials));
c->base.type = GRPC_CREDENTIALS_TYPE_SSL;
c->base.vtable = &ssl_server_vtable;
- ssl_build_config(pem_root_certs, pem_root_certs_size, pem_private_key,
- pem_private_key_size, pem_cert_chain, pem_cert_chain_size,
- &c->config);
+ ssl_build_server_config(pem_root_certs, pem_key_cert_pairs,
+ num_key_cert_pairs, &c->config);
return &c->base;
}
diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h
index 4a2532d7c4..8a9ff41e10 100644
--- a/src/core/security/credentials.h
+++ b/src/core/security/credentials.h
@@ -137,10 +137,17 @@ struct grpc_server_credentials {
const char *type;
};
-/* TODO(jboeuf): Have an ssl_server_config that can contain multiple key/cert
- pairs. */
+typedef struct {
+ unsigned char **pem_private_keys;
+ size_t *pem_private_keys_sizes;
+ unsigned char **pem_cert_chains;
+ size_t *pem_cert_chains_sizes;
+ size_t num_key_cert_pairs;
+ unsigned char *pem_root_certs;
+ size_t pem_root_certs_size;
+} grpc_ssl_server_config;
-const grpc_ssl_config *grpc_ssl_server_credentials_get_config(
+const grpc_ssl_server_config *grpc_ssl_server_credentials_get_config(
const grpc_server_credentials *ssl_creds);
#endif /* __GRPC_INTERNAL_SECURITY_CREDENTIALS_H__ */
diff --git a/src/core/security/security_context.c b/src/core/security/security_context.c
index 3a70f44a0a..cce3c7fe04 100644
--- a/src/core/security/security_context.c
+++ b/src/core/security/security_context.c
@@ -382,7 +382,7 @@ error:
}
grpc_security_status grpc_ssl_server_security_context_create(
- const grpc_ssl_config *config, grpc_security_context **ctx) {
+ const grpc_ssl_server_config *config, grpc_security_context **ctx) {
size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
const unsigned char **alpn_protocol_strings =
gpr_malloc(sizeof(const char *) * num_alpn_protocols);
@@ -399,8 +399,7 @@ grpc_security_status grpc_ssl_server_security_context_create(
strlen(grpc_chttp2_get_alpn_version_index(i));
}
- if (config == NULL || config->pem_private_key == NULL ||
- config->pem_cert_chain == NULL) {
+ if (config == NULL || config->num_key_cert_pairs == 0) {
gpr_log(GPR_ERROR, "An SSL server needs a key and a cert.");
goto error;
}
@@ -410,13 +409,13 @@ grpc_security_status grpc_ssl_server_security_context_create(
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &ssl_server_vtable;
result = tsi_create_ssl_server_handshaker_factory(
- (const unsigned char **)&config->pem_private_key,
- &config->pem_private_key_size,
- (const unsigned char **)&config->pem_cert_chain,
- &config->pem_cert_chain_size, 1, config->pem_root_certs,
- config->pem_root_certs_size, GRPC_SSL_CIPHER_SUITES,
- alpn_protocol_strings, alpn_protocol_string_lengths, num_alpn_protocols,
- &c->handshaker_factory);
+ (const unsigned char **)config->pem_private_keys,
+ config->pem_private_keys_sizes,
+ (const unsigned char **)config->pem_cert_chains,
+ config->pem_cert_chains_sizes, config->num_key_cert_pairs,
+ config->pem_root_certs, config->pem_root_certs_size,
+ GRPC_SSL_CIPHER_SUITES, alpn_protocol_strings,
+ alpn_protocol_string_lengths, num_alpn_protocols, &c->handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
diff --git a/src/core/security/security_context.h b/src/core/security/security_context.h
index 9ace7f1ccb..2caa2d3690 100644
--- a/src/core/security/security_context.h
+++ b/src/core/security/security_context.h
@@ -157,7 +157,7 @@ grpc_security_status grpc_ssl_channel_security_context_create(
specific error code otherwise.
*/
grpc_security_status grpc_ssl_server_security_context_create(
- const grpc_ssl_config *config, grpc_security_context **ctx);
+ const grpc_ssl_server_config *config, grpc_security_context **ctx);
/* --- Creation of high level objects. --- */
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 931fa95651..9dd4327822 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -93,6 +93,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
grpc_tcp_server *tcp = NULL;
size_t i;
int count = 0;
+ int port_num = -1;
+ int port_temp;
resolved = grpc_blocking_resolve_address(addr, "https");
if (!resolved) {
@@ -105,9 +107,15 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
}
for (i = 0; i < resolved->naddrs; i++) {
- if (grpc_tcp_server_add_port(tcp,
- (struct sockaddr *)&resolved->addrs[i].addr,
- resolved->addrs[i].len)) {
+ port_temp = grpc_tcp_server_add_port(
+ tcp, (struct sockaddr *)&resolved->addrs[i].addr,
+ resolved->addrs[i].len);
+ if (port_temp >= 0) {
+ if (port_num == -1) {
+ port_num = port_temp;
+ } else {
+ GPR_ASSERT(port_num == port_temp);
+ }
count++;
}
}
@@ -125,7 +133,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
/* Register with the server only upon success */
grpc_server_add_listener(server, tcp, start, destroy);
- return 1;
+ return port_num;
/* Error path: cleanup and return */
error:
diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c
index 78d4c3b446..9e11f8a865 100644
--- a/src/core/support/time_posix.c
+++ b/src/core/support/time_posix.c
@@ -61,7 +61,7 @@ gpr_timespec gpr_now(void) {
struct timeval now_tv;
gettimeofday(&now_tv, NULL);
now.tv_sec = now_tv.tv_sec;
- now.tv_nsec = now_tv.tv_usec / 1000;
+ now.tv_nsec = now_tv.tv_usec * 1000;
return now;
}
#endif
diff --git a/src/cpp/client/credentials.cc b/src/cpp/client/credentials.cc
index 0955fa28ae..8e3a988477 100644
--- a/src/cpp/client/credentials.cc
+++ b/src/cpp/client/credentials.cc
@@ -54,26 +54,12 @@ std::unique_ptr<Credentials> CredentialsFactory::DefaultCredentials() {
// Builds SSL Credentials given SSL specific options
std::unique_ptr<Credentials> CredentialsFactory::SslCredentials(
const SslCredentialsOptions &options) {
- const unsigned char *pem_root_certs =
- options.pem_root_certs.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_root_certs.c_str());
- if (pem_root_certs == nullptr) {
- return std::unique_ptr<Credentials>();
- }
- const unsigned char *pem_private_key =
- options.pem_private_key.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_private_key.c_str());
- const unsigned char *pem_cert_chain =
- options.pem_cert_chain.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_cert_chain.c_str());
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {
+ options.pem_private_key.c_str(), options.pem_cert_chain.c_str()};
grpc_credentials *c_creds = grpc_ssl_credentials_create(
- pem_root_certs, options.pem_root_certs.size(), pem_private_key,
- options.pem_private_key.size(), pem_cert_chain,
- options.pem_cert_chain.size());
+ options.pem_root_certs.empty() ? nullptr : options.pem_root_certs.c_str(),
+ options.pem_private_key.empty() ? nullptr : &pem_key_cert_pair);
std::unique_ptr<Credentials> cpp_creds(
c_creds == nullptr ? nullptr : new Credentials(c_creds));
return cpp_creds;
diff --git a/src/cpp/server/server_credentials.cc b/src/cpp/server/server_credentials.cc
index b82a2d821a..ce0271b6a0 100644
--- a/src/cpp/server/server_credentials.cc
+++ b/src/cpp/server/server_credentials.cc
@@ -48,23 +48,14 @@ grpc_server_credentials *ServerCredentials::GetRawCreds() { return creds_; }
std::shared_ptr<ServerCredentials> ServerCredentialsFactory::SslCredentials(
const SslServerCredentialsOptions &options) {
- const unsigned char *pem_root_certs =
- options.pem_root_certs.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_root_certs.c_str());
- const unsigned char *pem_private_key =
- options.pem_private_key.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_private_key.c_str());
- const unsigned char *pem_cert_chain =
- options.pem_cert_chain.empty() ? nullptr
- : reinterpret_cast<const unsigned char *>(
- options.pem_cert_chain.c_str());
-
+ std::vector<grpc_ssl_pem_key_cert_pair> pem_key_cert_pairs;
+ for (const auto &key_cert_pair : options.pem_key_cert_pairs) {
+ pem_key_cert_pairs.push_back(
+ {key_cert_pair.private_key.c_str(), key_cert_pair.cert_chain.c_str()});
+ }
grpc_server_credentials *c_creds = grpc_ssl_server_credentials_create(
- pem_root_certs, options.pem_root_certs.size(), pem_private_key,
- options.pem_private_key.size(), pem_cert_chain,
- options.pem_cert_chain.size());
+ options.pem_root_certs.empty() ? nullptr : options.pem_root_certs.c_str(),
+ &pem_key_cert_pairs[0], pem_key_cert_pairs.size());
return std::shared_ptr<ServerCredentials>(new ServerCredentials(c_creds));
}
diff --git a/src/node/binding.gyp b/src/node/binding.gyp
index 4a1fd7aaf0..da4a943491 100644
--- a/src/node/binding.gyp
+++ b/src/node/binding.gyp
@@ -19,9 +19,6 @@
'link_settings': {
'libraries': [
'-lgrpc',
- '-levent',
- '-levent_pthreads',
- '-levent_core',
'-lrt',
'-lgpr',
'-lpthread'
diff --git a/src/node/client.js b/src/node/client.js
index edaa115d0f..f913b06f29 100644
--- a/src/node/client.js
+++ b/src/node/client.js
@@ -45,10 +45,22 @@ util.inherits(GrpcClientStream, Duplex);
* from stream.Duplex.
* @constructor
* @param {grpc.Call} call Call object to proxy
- * @param {object} options Stream options
+ * @param {function(*):Buffer=} serialize Serialization function for requests
+ * @param {function(Buffer):*=} deserialize Deserialization function for
+ * responses
*/
-function GrpcClientStream(call, options) {
- Duplex.call(this, options);
+function GrpcClientStream(call, serialize, deserialize) {
+ Duplex.call(this, {objectMode: true});
+ if (!serialize) {
+ serialize = function(value) {
+ return value;
+ };
+ }
+ if (!deserialize) {
+ deserialize = function(value) {
+ return value;
+ };
+ }
var self = this;
// Indicates that we can start reading and have not received a null read
var can_read = false;
@@ -59,6 +71,32 @@ function GrpcClientStream(call, options) {
// Indicates that a write is currently pending
var writing = false;
this._call = call;
+
+ /**
+ * Serialize a request value to a buffer. Always maps null to null. Otherwise
+ * uses the provided serialize function
+ * @param {*} value The value to serialize
+ * @return {Buffer} The serialized value
+ */
+ this.serialize = function(value) {
+ if (value === null || value === undefined) {
+ return null;
+ }
+ return serialize(value);
+ };
+
+ /**
+ * Deserialize a response buffer to a value. Always maps null to null.
+ * Otherwise uses the provided deserialize function.
+ * @param {Buffer} buffer The buffer to deserialize
+ * @return {*} The deserialized value
+ */
+ this.deserialize = function(buffer) {
+ if (buffer === null) {
+ return null;
+ }
+ return deserialize(buffer);
+ };
/**
* Callback to handle receiving a READ event. Pushes the data from that event
* onto the read queue and starts reading again if applicable.
@@ -66,7 +104,7 @@ function GrpcClientStream(call, options) {
*/
function readCallback(event) {
var data = event.data;
- if (self.push(data)) {
+ if (self.push(self.deserialize(data))) {
if (data == null) {
// Disable starting to read after null read was received
can_read = false;
@@ -102,7 +140,7 @@ function GrpcClientStream(call, options) {
next.callback();
writeNext();
};
- call.startWrite(next.chunk, writeCallback, 0);
+ call.startWrite(self.serialize(next.chunk), writeCallback, 0);
} else {
writing = false;
}
@@ -171,6 +209,9 @@ GrpcClientStream.prototype._write = function(chunk, encoding, callback) {
* Make a request on the channel to the given method with the given arguments
* @param {grpc.Channel} channel The channel on which to make the request
* @param {string} method The method to request
+ * @param {function(*):Buffer} serialize Serialization function for requests
+ * @param {function(Buffer):*} deserialize Deserialization function for
+ * responses
* @param {array=} metadata Array of metadata key/value pairs to add to the call
* @param {(number|Date)=} deadline The deadline for processing this request.
* Defaults to infinite future.
@@ -178,6 +219,8 @@ GrpcClientStream.prototype._write = function(chunk, encoding, callback) {
*/
function makeRequest(channel,
method,
+ serialize,
+ deserialize,
metadata,
deadline) {
if (deadline === undefined) {
diff --git a/src/node/common.js b/src/node/common.js
index 656a4aca95..54247e3fa1 100644
--- a/src/node/common.js
+++ b/src/node/common.js
@@ -31,6 +31,8 @@
*
*/
+var capitalize = require('underscore.string/capitalize');
+
/**
* Get a function that deserializes a specific type of protobuf.
* @param {function()} cls The constructor of the message type to deserialize
@@ -73,6 +75,9 @@ function fullyQualifiedName(value) {
return '';
}
var name = value.name;
+ if (value.className === 'Service.RPCMethod') {
+ name = capitalize(name);
+ }
if (value.hasOwnProperty('parent')) {
var parent_name = fullyQualifiedName(value.parent);
if (parent_name !== '') {
diff --git a/src/node/credentials.cc b/src/node/credentials.cc
index d58b7eda89..f9cd2fcfe0 100644
--- a/src/node/credentials.cc
+++ b/src/node/credentials.cc
@@ -136,33 +136,29 @@ NAN_METHOD(Credentials::CreateDefault) {
NAN_METHOD(Credentials::CreateSsl) {
NanScope();
- char *root_certs;
- char *private_key = NULL;
- char *cert_chain = NULL;
- int root_certs_length, private_key_length = 0, cert_chain_length = 0;
- if (!Buffer::HasInstance(args[0])) {
+ char *root_certs = NULL;
+ grpc_ssl_pem_key_cert_pair key_cert_pair = {NULL, NULL};
+ if (Buffer::HasInstance(args[0])) {
+ root_certs = Buffer::Data(args[0]);
+ } else if (!(args[0]->IsNull() || args[0]->IsUndefined())) {
return NanThrowTypeError("createSsl's first argument must be a Buffer");
}
- root_certs = Buffer::Data(args[0]);
- root_certs_length = Buffer::Length(args[0]);
if (Buffer::HasInstance(args[1])) {
- private_key = Buffer::Data(args[1]);
- private_key_length = Buffer::Length(args[1]);
+ key_cert_pair.private_key = Buffer::Data(args[1]);
} else if (!(args[1]->IsNull() || args[1]->IsUndefined())) {
return NanThrowTypeError(
"createSSl's second argument must be a Buffer if provided");
}
if (Buffer::HasInstance(args[2])) {
- cert_chain = Buffer::Data(args[2]);
- cert_chain_length = Buffer::Length(args[2]);
+ key_cert_pair.cert_chain = Buffer::Data(args[2]);
} else if (!(args[2]->IsNull() || args[2]->IsUndefined())) {
return NanThrowTypeError(
"createSSl's third argument must be a Buffer if provided");
}
+
NanReturnValue(WrapStruct(grpc_ssl_credentials_create(
- reinterpret_cast<unsigned char *>(root_certs), root_certs_length,
- reinterpret_cast<unsigned char *>(private_key), private_key_length,
- reinterpret_cast<unsigned char *>(cert_chain), cert_chain_length)));
+ root_certs,
+ key_cert_pair.private_key == NULL ? NULL : &key_cert_pair)));
}
NAN_METHOD(Credentials::CreateComposite) {
diff --git a/src/node/examples/math_server.js b/src/node/examples/math_server.js
index 366513dc17..d649b4fd6d 100644
--- a/src/node/examples/math_server.js
+++ b/src/node/examples/math_server.js
@@ -119,10 +119,10 @@ function mathDivMany(stream) {
var server = new Server({
'math.Math' : {
- Div: mathDiv,
- Fib: mathFib,
- Sum: mathSum,
- DivMany: mathDivMany
+ div: mathDiv,
+ fib: mathFib,
+ sum: mathSum,
+ divMany: mathDivMany
}
});
diff --git a/src/node/interop/empty.proto b/src/node/interop/empty.proto
new file mode 100644
index 0000000000..c9920a22ee
--- /dev/null
+++ b/src/node/interop/empty.proto
@@ -0,0 +1,19 @@
+syntax = "proto2";
+
+package grpc.testing;
+
+// An empty message that you can re-use to avoid defining duplicated empty
+// messages in your project. A typical example is to use it as argument or the
+// return value of a service API. For instance:
+//
+// service Foo {
+// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { };
+// };
+//
+// MOE:begin_strip
+// The difference between this one and net/rpc/empty-message.proto is that
+// 1) The generated message here is in proto2 C++ API.
+// 2) The proto2.Empty has minimum dependencies
+// (no message_set or net/rpc dependencies)
+// MOE:end_strip
+message Empty {}
diff --git a/src/node/interop/interop_client.js b/src/node/interop/interop_client.js
new file mode 100644
index 0000000000..cf75b9a77a
--- /dev/null
+++ b/src/node/interop/interop_client.js
@@ -0,0 +1,274 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+var fs = require('fs');
+var path = require('path');
+var grpc = require('..');
+var testProto = grpc.load(__dirname + '/test.proto').grpc.testing;
+
+var assert = require('assert');
+
+/**
+ * Create a buffer filled with size zeroes
+ * @param {number} size The length of the buffer
+ * @return {Buffer} The new buffer
+ */
+function zeroBuffer(size) {
+ var zeros = new Buffer(size);
+ zeros.fill(0);
+ return zeros;
+}
+
+/**
+ * Run the empty_unary test
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function emptyUnary(client, done) {
+ var call = client.emptyCall({}, function(err, resp) {
+ assert.ifError(err);
+ });
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+}
+
+/**
+ * Run the large_unary test
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function largeUnary(client, done) {
+ var arg = {
+ response_type: testProto.PayloadType.COMPRESSABLE,
+ response_size: 314159,
+ payload: {
+ body: zeroBuffer(271828)
+ }
+ };
+ var call = client.unaryCall(arg, function(err, resp) {
+ assert.ifError(err);
+ assert.strictEqual(resp.payload.type, testProto.PayloadType.COMPRESSABLE);
+ assert.strictEqual(resp.payload.body.limit - resp.payload.body.offset,
+ 314159);
+ });
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+}
+
+/**
+ * Run the client_streaming test
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function clientStreaming(client, done) {
+ var call = client.streamingInputCall(function(err, resp) {
+ assert.ifError(err);
+ assert.strictEqual(resp.aggregated_payload_size, 74922);
+ });
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+ var payload_sizes = [27182, 8, 1828, 45904];
+ for (var i = 0; i < payload_sizes.length; i++) {
+ call.write({payload: {body: zeroBuffer(payload_sizes[i])}});
+ }
+ call.end();
+}
+
+/**
+ * Run the server_streaming test
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function serverStreaming(client, done) {
+ var arg = {
+ response_type: testProto.PayloadType.COMPRESSABLE,
+ response_parameters: [
+ {size: 31415},
+ {size: 9},
+ {size: 2653},
+ {size: 58979}
+ ]
+ };
+ var call = client.streamingOutputCall(arg);
+ var resp_index = 0;
+ call.on('data', function(value) {
+ assert(resp_index < 4);
+ assert.strictEqual(value.payload.type, testProto.PayloadType.COMPRESSABLE);
+ assert.strictEqual(value.payload.body.limit - value.payload.body.offset,
+ arg.response_parameters[resp_index].size);
+ resp_index += 1;
+ });
+ call.on('status', function(status) {
+ assert.strictEqual(resp_index, 4);
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+}
+
+/**
+ * Run the ping_pong test
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function pingPong(client, done) {
+ var payload_sizes = [27182, 8, 1828, 45904];
+ var response_sizes = [31415, 9, 2653, 58979];
+ var call = client.fullDuplexCall();
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+ var index = 0;
+ call.write({
+ response_type: testProto.PayloadType.COMPRESSABLE,
+ response_parameters: [
+ {size: response_sizes[index]}
+ ],
+ payload: {body: zeroBuffer(payload_sizes[index])}
+ });
+ call.on('data', function(response) {
+ assert.strictEqual(response.payload.type,
+ testProto.PayloadType.COMPRESSABLE);
+ assert.equal(response.payload.body.limit - response.payload.body.offset,
+ response_sizes[index]);
+ index += 1;
+ if (index == 4) {
+ call.end();
+ } else {
+ call.write({
+ response_type: testProto.PayloadType.COMPRESSABLE,
+ response_parameters: [
+ {size: response_sizes[index]}
+ ],
+ payload: {body: zeroBuffer(payload_sizes[index])}
+ });
+ }
+ });
+}
+
+/**
+ * Run the empty_stream test.
+ * NOTE: This does not work, but should with the new invoke API
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function emptyStream(client, done) {
+ var call = client.fullDuplexCall();
+ call.on('status', function(status) {
+ assert.strictEqual(status.code, grpc.status.OK);
+ if (done) {
+ done();
+ }
+ });
+ call.on('data', function(value) {
+ assert.fail(value, null, 'No data should have been received', '!==');
+ });
+ call.end();
+}
+
+/**
+ * Map from test case names to test functions
+ */
+var test_cases = {
+ empty_unary: emptyUnary,
+ large_unary: largeUnary,
+ client_streaming: clientStreaming,
+ server_streaming: serverStreaming,
+ ping_pong: pingPong,
+ empty_stream: emptyStream
+};
+
+/**
+ * Execute a single test case.
+ * @param {string} address The address of the server to connect to, in the
+ * format "hostname:port"
+ * @param {string} host_overrirde The hostname of the server to use as an SSL
+ * override
+ * @param {string} test_case The name of the test case to run
+ * @param {bool} tls Indicates that a secure channel should be used
+ * @param {function} done Callback to call when the test is completed. Included
+ * primarily for use with mocha
+ */
+function runTest(address, host_override, test_case, tls, done) {
+ // TODO(mlumish): enable TLS functionality
+ var options = {};
+ if (tls) {
+ var ca_path = path.join(__dirname, '../test/data/ca.pem');
+ var ca_data = fs.readFileSync(ca_path);
+ var creds = grpc.Credentials.createSsl(ca_data);
+ options.credentials = creds;
+ if (host_override) {
+ options['grpc.ssl_target_name_override'] = host_override;
+ }
+ }
+ var client = new testProto.TestService(address, options);
+
+ test_cases[test_case](client, done);
+}
+
+if (require.main === module) {
+ var parseArgs = require('minimist');
+ var argv = parseArgs(process.argv, {
+ string: ['server_host', 'server_host_override', 'server_port', 'test_case',
+ 'use_tls', 'use_test_ca']
+ });
+ runTest(argv.server_host + ':' + argv.server_port, argv.server_host_override,
+ argv.test_case, argv.use_tls === 'true');
+}
+
+/**
+ * See docs for runTest
+ */
+exports.runTest = runTest;
diff --git a/src/node/interop/interop_server.js b/src/node/interop/interop_server.js
new file mode 100644
index 0000000000..6d2bd7ae0d
--- /dev/null
+++ b/src/node/interop/interop_server.js
@@ -0,0 +1,203 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+var fs = require('fs');
+var path = require('path');
+var _ = require('underscore');
+var grpc = require('..');
+var testProto = grpc.load(__dirname + '/test.proto').grpc.testing;
+var Server = grpc.buildServer([testProto.TestService.service]);
+
+/**
+ * Create a buffer filled with size zeroes
+ * @param {number} size The length of the buffer
+ * @return {Buffer} The new buffer
+ */
+function zeroBuffer(size) {
+ var zeros = new Buffer(size);
+ zeros.fill(0);
+ return zeros;
+}
+
+/**
+ * Respond to an empty parameter with an empty response.
+ * NOTE: this currently does not work due to issue #137
+ * @param {Call} call Call to handle
+ * @param {function(Error, Object)} callback Callback to call with result
+ * or error
+ */
+function handleEmpty(call, callback) {
+ callback(null, {});
+}
+
+/**
+ * Handle a unary request by sending the requested payload
+ * @param {Call} call Call to handle
+ * @param {function(Error, Object)} callback Callback to call with result or
+ * error
+ */
+function handleUnary(call, callback) {
+ var req = call.request;
+ var zeros = zeroBuffer(req.response_size);
+ var payload_type = req.response_type;
+ if (payload_type === testProto.PayloadType.RANDOM) {
+ payload_type = [
+ testProto.PayloadType.COMPRESSABLE,
+ testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
+ }
+ callback(null, {payload: {type: payload_type, body: zeros}});
+}
+
+/**
+ * Respond to a streaming call with the total size of all payloads
+ * @param {Call} call Call to handle
+ * @param {function(Error, Object)} callback Callback to call with result or
+ * error
+ */
+function handleStreamingInput(call, callback) {
+ var aggregate_size = 0;
+ call.on('data', function(value) {
+ aggregate_size += value.payload.body.limit - value.payload.body.offset;
+ });
+ call.on('end', function() {
+ callback(null, {aggregated_payload_size: aggregate_size});
+ });
+}
+
+/**
+ * Respond to a payload request with a stream of the requested payloads
+ * @param {Call} call Call to handle
+ */
+function handleStreamingOutput(call) {
+ var req = call.request;
+ var payload_type = req.response_type;
+ if (payload_type === testProto.PayloadType.RANDOM) {
+ payload_type = [
+ testProto.PayloadType.COMPRESSABLE,
+ testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
+ }
+ _.each(req.response_parameters, function(resp_param) {
+ call.write({
+ payload: {
+ body: zeroBuffer(resp_param.size),
+ type: payload_type
+ }
+ });
+ });
+ call.end();
+}
+
+/**
+ * Respond to a stream of payload requests with a stream of payload responses as
+ * they arrive.
+ * @param {Call} call Call to handle
+ */
+function handleFullDuplex(call) {
+ call.on('data', function(value) {
+ var payload_type = value.response_type;
+ if (payload_type === testProto.PayloadType.RANDOM) {
+ payload_type = [
+ testProto.PayloadType.COMPRESSABLE,
+ testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
+ }
+ _.each(value.response_parameters, function(resp_param) {
+ call.write({
+ payload: {
+ body: zeroBuffer(resp_param.size),
+ type: payload_type
+ }
+ });
+ });
+ });
+ call.on('end', function() {
+ call.end();
+ });
+}
+
+/**
+ * Respond to a stream of payload requests with a stream of payload responses
+ * after all requests have arrived
+ * @param {Call} call Call to handle
+ */
+function handleHalfDuplex(call) {
+ throw new Error('HalfDuplexCall not yet implemented');
+}
+
+/**
+ * Get a server object bound to the given port
+ * @param {string} port Port to which to bind
+ * @param {boolean} tls Indicates that the bound port should use TLS
+ * @return {{server: Server, port: number}} Server object bound to the support,
+ * and port number that the server is bound to
+ */
+function getServer(port, tls) {
+ // TODO(mlumish): enable TLS functionality
+ var options = {};
+ if (tls) {
+ var key_path = path.join(__dirname, '../test/data/server1.key');
+ var pem_path = path.join(__dirname, '../test/data/server1.pem');
+
+ var key_data = fs.readFileSync(key_path);
+ var pem_data = fs.readFileSync(pem_path);
+ var server_creds = grpc.ServerCredentials.createSsl(null,
+ key_data,
+ pem_data);
+ options.credentials = server_creds;
+ }
+ var server = new Server({
+ 'grpc.testing.TestService' : {
+ emptyCall: handleEmpty,
+ unaryCall: handleUnary,
+ streamingOutputCall: handleStreamingOutput,
+ streamingInputCall: handleStreamingInput,
+ fullDuplexCall: handleFullDuplex,
+ halfDuplexCall: handleHalfDuplex
+ }
+ }, options);
+ var port_num = server.bind('0.0.0.0:' + port, tls);
+ return {server: server, port: port_num};
+}
+
+if (require.main === module) {
+ var parseArgs = require('minimist');
+ var argv = parseArgs(process.argv, {
+ string: ['port', 'use_tls']
+ });
+ var server_obj = getServer(argv.port, argv.use_tls === 'true');
+ server_obj.server.start();
+}
+
+/**
+ * See docs for getServer
+ */
+exports.getServer = getServer;
diff --git a/src/node/interop/messages.proto b/src/node/interop/messages.proto
new file mode 100644
index 0000000000..29db0dd8b1
--- /dev/null
+++ b/src/node/interop/messages.proto
@@ -0,0 +1,94 @@
+// Message definitions to be used by integration test service definitions.
+
+syntax = "proto2";
+
+package grpc.testing;
+
+// The type of payload that should be returned.
+enum PayloadType {
+ // Compressable text format.
+ COMPRESSABLE = 0;
+
+ // Uncompressable binary format.
+ UNCOMPRESSABLE = 1;
+
+ // Randomly chosen from all other formats defined in this enum.
+ RANDOM = 2;
+}
+
+// A block of data, to simply increase gRPC message size.
+message Payload {
+ // The type of data in body.
+ optional PayloadType type = 1;
+ // Primary contents of payload.
+ optional bytes body = 2;
+}
+
+// Unary request.
+message SimpleRequest {
+ // Desired payload type in the response from the server.
+ // If response_type is RANDOM, server randomly chooses one from other formats.
+ optional PayloadType response_type = 1;
+
+ // Desired payload size in the response from the server.
+ // If response_type is COMPRESSABLE, this denotes the size before compression.
+ optional int32 response_size = 2;
+
+ // Optional input payload sent along with the request.
+ optional Payload payload = 3;
+}
+
+// Unary response, as configured by the request.
+message SimpleResponse {
+ // Payload to increase message size.
+ optional Payload payload = 1;
+ // The user the request came from, for verifying authentication was
+ // successful when the client expected it.
+ optional int64 effective_gaia_user_id = 2;
+}
+
+// Client-streaming request.
+message StreamingInputCallRequest {
+ // Optional input payload sent along with the request.
+ optional Payload payload = 1;
+
+ // Not expecting any payload from the response.
+}
+
+// Client-streaming response.
+message StreamingInputCallResponse {
+ // Aggregated size of payloads received from the client.
+ optional int32 aggregated_payload_size = 1;
+}
+
+// Configuration for a particular response.
+message ResponseParameters {
+ // Desired payload sizes in responses from the server.
+ // If response_type is COMPRESSABLE, this denotes the size before compression.
+ optional int32 size = 1;
+
+ // Desired interval between consecutive responses in the response stream in
+ // microseconds.
+ optional int32 interval_us = 2;
+}
+
+// Server-streaming request.
+message StreamingOutputCallRequest {
+ // Desired payload type in the response from the server.
+ // If response_type is RANDOM, the payload from each response in the stream
+ // might be of different types. This is to simulate a mixed type of payload
+ // stream.
+ optional PayloadType response_type = 1;
+
+ // Configuration for each expected response message.
+ repeated ResponseParameters response_parameters = 2;
+
+ // Optional input payload sent along with the request.
+ optional Payload payload = 3;
+}
+
+// Server-streaming response, as configured by the request and parameters.
+message StreamingOutputCallResponse {
+ // Payload to increase response size.
+ optional Payload payload = 1;
+}
diff --git a/src/node/interop/test.proto b/src/node/interop/test.proto
new file mode 100644
index 0000000000..8380ebb31d
--- /dev/null
+++ b/src/node/interop/test.proto
@@ -0,0 +1,42 @@
+// An integration test service that covers all the method signature permutations
+// of unary/streaming requests/responses.
+syntax = "proto2";
+
+import "empty.proto";
+import "messages.proto";
+
+package grpc.testing;
+
+// A simple service to test the various types of RPCs and experiment with
+// performance with various types of payload.
+service TestService {
+ // One empty request followed by one empty response.
+ rpc EmptyCall(grpc.testing.Empty) returns (grpc.testing.Empty);
+
+ // One request followed by one response.
+ // The server returns the client payload as-is.
+ rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
+
+ // One request followed by a sequence of responses (streamed download).
+ // The server returns the payload with client desired type and sizes.
+ rpc StreamingOutputCall(StreamingOutputCallRequest)
+ returns (stream StreamingOutputCallResponse);
+
+ // A sequence of requests followed by one response (streamed upload).
+ // The server returns the aggregated size of client payload as the result.
+ rpc StreamingInputCall(stream StreamingInputCallRequest)
+ returns (StreamingInputCallResponse);
+
+ // A sequence of requests with each request served by the server immediately.
+ // As one request could lead to multiple responses, this interface
+ // demonstrates the idea of full duplexing.
+ rpc FullDuplexCall(stream StreamingOutputCallRequest)
+ returns (stream StreamingOutputCallResponse);
+
+ // A sequence of requests followed by a sequence of responses.
+ // The server buffers all the client requests and then serves them in order. A
+ // stream of responses are returned to the client when the server starts with
+ // first request.
+ rpc HalfDuplexCall(stream StreamingOutputCallRequest)
+ returns (stream StreamingOutputCallResponse);
+}
diff --git a/src/node/main.js b/src/node/main.js
index a8dfa20024..751c3525d3 100644
--- a/src/node/main.js
+++ b/src/node/main.js
@@ -55,7 +55,7 @@ function loadObject(value) {
return result;
} else if (value.className === 'Service') {
return surface_client.makeClientConstructor(value);
- } else if (value.className === 'Service.Message') {
+ } else if (value.className === 'Message' || value.className === 'Enum') {
return value.build();
} else {
return value;
@@ -96,3 +96,13 @@ exports.status = grpc.status;
* Call error name to code number mapping
*/
exports.callError = grpc.callError;
+
+/**
+ * Credentials factories
+ */
+exports.Credentials = grpc.Credentials;
+
+/**
+ * ServerCredentials factories
+ */
+exports.ServerCredentials = grpc.ServerCredentials;
diff --git a/src/node/package.json b/src/node/package.json
index ed93c4ff41..5f3c6fa345 100644
--- a/src/node/package.json
+++ b/src/node/package.json
@@ -8,12 +8,14 @@
"dependencies": {
"bindings": "^1.2.1",
"nan": "~1.3.0",
+ "protobufjs": "murgatroid99/ProtoBuf.js",
"underscore": "^1.7.0",
- "protobufjs": "murgatroid99/ProtoBuf.js"
+ "underscore.string": "^3.0.0"
},
"devDependencies": {
+ "highland": "~2.2.0",
"mocha": "~1.21.0",
- "highland": "~2.0.0"
+ "minimist": "^1.1.0"
},
"main": "main.js"
}
diff --git a/src/node/server.cc b/src/node/server.cc
index 64826897cd..b102775d33 100644
--- a/src/node/server.cc
+++ b/src/node/server.cc
@@ -194,7 +194,7 @@ NAN_METHOD(Server::AddHttp2Port) {
return NanThrowTypeError("addHttp2Port's argument must be a String");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
- NanReturnValue(NanNew<Boolean>(grpc_server_add_http2_port(
+ NanReturnValue(NanNew<Number>(grpc_server_add_http2_port(
server->wrapped_server, *NanUtf8String(args[0]))));
}
@@ -208,7 +208,7 @@ NAN_METHOD(Server::AddSecureHttp2Port) {
return NanThrowTypeError("addSecureHttp2Port's argument must be a String");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
- NanReturnValue(NanNew<Boolean>(grpc_server_add_secure_http2_port(
+ NanReturnValue(NanNew<Number>(grpc_server_add_secure_http2_port(
server->wrapped_server, *NanUtf8String(args[0]))));
}
diff --git a/src/node/server.js b/src/node/server.js
index e947032b29..eca20aa5fd 100644
--- a/src/node/server.js
+++ b/src/node/server.js
@@ -47,10 +47,22 @@ util.inherits(GrpcServerStream, Duplex);
* from stream.Duplex.
* @constructor
* @param {grpc.Call} call Call object to proxy
- * @param {object} options Stream options
+ * @param {function(*):Buffer=} serialize Serialization function for responses
+ * @param {function(Buffer):*=} deserialize Deserialization function for
+ * requests
*/
-function GrpcServerStream(call, options) {
- Duplex.call(this, options);
+function GrpcServerStream(call, serialize, deserialize) {
+ Duplex.call(this, {objectMode: true});
+ if (!serialize) {
+ serialize = function(value) {
+ return value;
+ };
+ }
+ if (!deserialize) {
+ deserialize = function(value) {
+ return value;
+ };
+ }
this._call = call;
// Indicate that a status has been sent
var finished = false;
@@ -59,6 +71,33 @@ function GrpcServerStream(call, options) {
'code' : grpc.status.OK,
'details' : 'OK'
};
+
+ /**
+ * Serialize a response value to a buffer. Always maps null to null. Otherwise
+ * uses the provided serialize function
+ * @param {*} value The value to serialize
+ * @return {Buffer} The serialized value
+ */
+ this.serialize = function(value) {
+ if (value === null || value === undefined) {
+ return null;
+ }
+ return serialize(value);
+ };
+
+ /**
+ * Deserialize a request buffer to a value. Always maps null to null.
+ * Otherwise uses the provided deserialize function.
+ * @param {Buffer} buffer The buffer to deserialize
+ * @return {*} The deserialized value
+ */
+ this.deserialize = function(buffer) {
+ if (buffer === null) {
+ return null;
+ }
+ return deserialize(buffer);
+ };
+
/**
* Send the pending status
*/
@@ -75,7 +114,6 @@ function GrpcServerStream(call, options) {
* @param {Error} err The error object
*/
function setStatus(err) {
- console.log('Server setting status to', err);
var code = grpc.status.INTERNAL;
var details = 'Unknown Error';
@@ -113,7 +151,7 @@ function GrpcServerStream(call, options) {
return;
}
var data = event.data;
- if (self.push(data) && data != null) {
+ if (self.push(deserialize(data)) && data != null) {
self._call.startRead(readCallback);
} else {
reading = false;
@@ -155,7 +193,7 @@ GrpcServerStream.prototype._read = function(size) {
*/
GrpcServerStream.prototype._write = function(chunk, encoding, callback) {
var self = this;
- self._call.startWrite(chunk, function(event) {
+ self._call.startWrite(self.serialize(chunk), function(event) {
callback();
}, 0);
};
@@ -211,12 +249,13 @@ function Server(options) {
}
}, 0);
call.serverEndInitialMetadata(0);
- var stream = new GrpcServerStream(call);
+ var stream = new GrpcServerStream(call, handler.serialize,
+ handler.deserialize);
Object.defineProperty(stream, 'cancelled', {
get: function() { return cancelled;}
});
try {
- handler(stream, data.metadata);
+ handler.func(stream, data.metadata);
} catch (e) {
stream.emit('error', e);
}
@@ -237,14 +276,20 @@ function Server(options) {
* handle/respond to.
* @param {function} handler Function that takes a stream of request values and
* returns a stream of response values
+ * @param {function(*):Buffer} serialize Serialization function for responses
+ * @param {function(Buffer):*} deserialize Deserialization function for requests
* @return {boolean} True if the handler was set. False if a handler was already
* set for that name.
*/
-Server.prototype.register = function(name, handler) {
+Server.prototype.register = function(name, handler, serialize, deserialize) {
if (this.handlers.hasOwnProperty(name)) {
return false;
}
- this.handlers[name] = handler;
+ this.handlers[name] = {
+ func: handler,
+ serialize: serialize,
+ deserialize: deserialize
+ };
return true;
};
@@ -256,9 +301,9 @@ Server.prototype.register = function(name, handler) {
*/
Server.prototype.bind = function(port, secure) {
if (secure) {
- this._server.addSecureHttp2Port(port);
+ return this._server.addSecureHttp2Port(port);
} else {
- this._server.addHttp2Port(port);
+ return this._server.addHttp2Port(port);
}
};
diff --git a/src/node/server_credentials.cc b/src/node/server_credentials.cc
index 38df547527..393f3a6305 100644
--- a/src/node/server_credentials.cc
+++ b/src/node/server_credentials.cc
@@ -123,14 +123,12 @@ NAN_METHOD(ServerCredentials::New) {
}
NAN_METHOD(ServerCredentials::CreateSsl) {
+ // TODO: have the node API support multiple key/cert pairs.
NanScope();
char *root_certs = NULL;
- char *private_key;
- char *cert_chain;
- int root_certs_length = 0, private_key_length, cert_chain_length;
+ grpc_ssl_pem_key_cert_pair key_cert_pair;
if (Buffer::HasInstance(args[0])) {
root_certs = Buffer::Data(args[0]);
- root_certs_length = Buffer::Length(args[0]);
} else if (!(args[0]->IsNull() || args[0]->IsUndefined())) {
return NanThrowTypeError(
"createSSl's first argument must be a Buffer if provided");
@@ -138,17 +136,13 @@ NAN_METHOD(ServerCredentials::CreateSsl) {
if (!Buffer::HasInstance(args[1])) {
return NanThrowTypeError("createSsl's second argument must be a Buffer");
}
- private_key = Buffer::Data(args[1]);
- private_key_length = Buffer::Length(args[1]);
+ key_cert_pair.private_key = Buffer::Data(args[1]);
if (!Buffer::HasInstance(args[2])) {
return NanThrowTypeError("createSsl's third argument must be a Buffer");
}
- cert_chain = Buffer::Data(args[2]);
- cert_chain_length = Buffer::Length(args[2]);
- NanReturnValue(WrapStruct(grpc_ssl_server_credentials_create(
- reinterpret_cast<unsigned char *>(root_certs), root_certs_length,
- reinterpret_cast<unsigned char *>(private_key), private_key_length,
- reinterpret_cast<unsigned char *>(cert_chain), cert_chain_length)));
+ key_cert_pair.cert_chain = Buffer::Data(args[2]);
+ NanReturnValue(WrapStruct(
+ grpc_ssl_server_credentials_create(root_certs, &key_cert_pair, 1)));
}
NAN_METHOD(ServerCredentials::CreateFake) {
diff --git a/src/node/surface_client.js b/src/node/surface_client.js
index 77dab5ca6f..996e3d101f 100644
--- a/src/node/surface_client.js
+++ b/src/node/surface_client.js
@@ -33,6 +33,9 @@
var _ = require('underscore');
+var capitalize = require('underscore.string/capitalize');
+var decapitalize = require('underscore.string/decapitalize');
+
var client = require('./client.js');
var common = require('./common.js');
@@ -352,10 +355,11 @@ function makeClientConstructor(service) {
method_type = 'unary';
}
}
- SurfaceClient.prototype[method.name] = requester_makers[method_type](
- prefix + method.name,
- common.serializeCls(method.resolvedRequestType.build()),
- common.deserializeCls(method.resolvedResponseType.build()));
+ SurfaceClient.prototype[decapitalize(method.name)] =
+ requester_makers[method_type](
+ prefix + capitalize(method.name),
+ common.serializeCls(method.resolvedRequestType.build()),
+ common.deserializeCls(method.resolvedResponseType.build()));
});
SurfaceClient.service = service;
diff --git a/src/node/surface_server.js b/src/node/surface_server.js
index b6e0c37b4c..bc688839fe 100644
--- a/src/node/surface_server.js
+++ b/src/node/surface_server.js
@@ -33,6 +33,9 @@
var _ = require('underscore');
+var capitalize = require('underscore.string/capitalize');
+var decapitalize = require('underscore.string/decapitalize');
+
var Server = require('./server.js');
var stream = require('stream');
@@ -332,15 +335,16 @@ function makeServerConstructor(services) {
method_type = 'unary';
}
}
- if (service_handlers[service_name][method.name] === undefined) {
+ if (service_handlers[service_name][decapitalize(method.name)] ===
+ undefined) {
throw new Error('Method handler for ' +
common.fullyQualifiedName(method) + ' not provided.');
}
var binary_handler = handler_makers[method_type](
- service_handlers[service_name][method.name],
+ service_handlers[service_name][decapitalize(method.name)],
common.serializeCls(method.resolvedResponseType.build()),
common.deserializeCls(method.resolvedRequestType.build()));
- server.register(prefix + method.name, binary_handler);
+ server.register(prefix + capitalize(method.name), binary_handler);
});
}, this);
}
@@ -353,8 +357,7 @@ function makeServerConstructor(services) {
* @return {SurfaceServer} this
*/
SurfaceServer.prototype.bind = function(port, secure) {
- this.inner_server.bind(port, secure);
- return this;
+ return this.inner_server.bind(port, secure);
};
/**
diff --git a/src/node/test/client_server_test.js b/src/node/test/client_server_test.js
index 534a5c464f..2a25908684 100644
--- a/src/node/test/client_server_test.js
+++ b/src/node/test/client_server_test.js
@@ -37,7 +37,6 @@ var path = require('path');
var grpc = require('bindings')('grpc.node');
var Server = require('../server');
var client = require('../client');
-var port_picker = require('../port_picker');
var common = require('../common');
var _ = require('highland');
@@ -80,55 +79,50 @@ function errorHandler(stream) {
describe('echo client', function() {
it('should receive echo responses', function(done) {
- port_picker.nextAvailablePort(function(port) {
- var server = new Server();
- server.bind(port);
- server.register('echo', echoHandler);
- server.start();
-
- var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
- var channel = new grpc.Channel(port);
- var stream = client.makeRequest(
- channel,
- 'echo');
- _(messages).map(function(val) {
- return new Buffer(val);
- }).pipe(stream);
- var index = 0;
- stream.on('data', function(chunk) {
- assert.equal(messages[index], chunk.toString());
- index += 1;
- });
- stream.on('end', function() {
- server.shutdown();
- done();
- });
+ var server = new Server();
+ var port_num = server.bind('0.0.0.0:0');
+ server.register('echo', echoHandler);
+ server.start();
+
+ var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
+ var channel = new grpc.Channel('localhost:' + port_num);
+ var stream = client.makeRequest(
+ channel,
+ 'echo');
+ _(messages).map(function(val) {
+ return new Buffer(val);
+ }).pipe(stream);
+ var index = 0;
+ stream.on('data', function(chunk) {
+ assert.equal(messages[index], chunk.toString());
+ index += 1;
+ });
+ stream.on('end', function() {
+ server.shutdown();
+ done();
});
});
it('should get an error status that the server throws', function(done) {
- port_picker.nextAvailablePort(function(port) {
- var server = new Server();
- server.bind(port);
- server.register('error', errorHandler);
- server.start();
-
- var channel = new grpc.Channel(port);
- var stream = client.makeRequest(
- channel,
- 'error',
- null,
- getDeadline(1));
-
- stream.on('data', function() {});
- stream.write(new Buffer('test'));
- stream.end();
- stream.on('status', function(status) {
- assert.equal(status.code, grpc.status.UNIMPLEMENTED);
- assert.equal(status.details, 'error details');
- server.shutdown();
- done();
- });
-
+ var server = new Server();
+ var port_num = server.bind('0.0.0.0:0');
+ server.register('error', errorHandler);
+ server.start();
+
+ var channel = new grpc.Channel('localhost:' + port_num);
+ var stream = client.makeRequest(
+ channel,
+ 'error',
+ null,
+ getDeadline(1));
+
+ stream.on('data', function() {});
+ stream.write(new Buffer('test'));
+ stream.end();
+ stream.on('status', function(status) {
+ assert.equal(status.code, grpc.status.UNIMPLEMENTED);
+ assert.equal(status.details, 'error details');
+ server.shutdown();
+ done();
});
});
});
@@ -136,46 +130,43 @@ describe('echo client', function() {
* and the insecure echo client test */
describe('secure echo client', function() {
it('should recieve echo responses', function(done) {
- port_picker.nextAvailablePort(function(port) {
- fs.readFile(ca_path, function(err, ca_data) {
+ fs.readFile(ca_path, function(err, ca_data) {
+ assert.ifError(err);
+ fs.readFile(key_path, function(err, key_data) {
assert.ifError(err);
- fs.readFile(key_path, function(err, key_data) {
+ fs.readFile(pem_path, function(err, pem_data) {
assert.ifError(err);
- fs.readFile(pem_path, function(err, pem_data) {
- assert.ifError(err);
- var creds = grpc.Credentials.createSsl(ca_data);
- var server_creds = grpc.ServerCredentials.createSsl(null,
- key_data,
- pem_data);
-
- var server = new Server({'credentials' : server_creds});
- server.bind(port, true);
- server.register('echo', echoHandler);
- server.start();
-
- var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
- var channel = new grpc.Channel(port, {
- 'grpc.ssl_target_name_override' : 'foo.test.google.com',
- 'credentials' : creds
- });
- var stream = client.makeRequest(
- channel,
- 'echo');
-
- _(messages).map(function(val) {
- return new Buffer(val);
- }).pipe(stream);
- var index = 0;
- stream.on('data', function(chunk) {
- assert.equal(messages[index], chunk.toString());
- index += 1;
- });
- stream.on('end', function() {
- server.shutdown();
- done();
- });
+ var creds = grpc.Credentials.createSsl(ca_data);
+ var server_creds = grpc.ServerCredentials.createSsl(null,
+ key_data,
+ pem_data);
+
+ var server = new Server({'credentials' : server_creds});
+ var port_num = server.bind('0.0.0.0:0', true);
+ server.register('echo', echoHandler);
+ server.start();
+
+ var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
+ var channel = new grpc.Channel('localhost:' + port_num, {
+ 'grpc.ssl_target_name_override' : 'foo.test.google.com',
+ 'credentials' : creds
+ });
+ var stream = client.makeRequest(
+ channel,
+ 'echo');
+
+ _(messages).map(function(val) {
+ return new Buffer(val);
+ }).pipe(stream);
+ var index = 0;
+ stream.on('data', function(chunk) {
+ assert.equal(messages[index], chunk.toString());
+ index += 1;
+ });
+ stream.on('end', function() {
+ server.shutdown();
+ done();
});
-
});
});
});
diff --git a/src/node/test/end_to_end_test.js b/src/node/test/end_to_end_test.js
index 40bb5f3bbd..db3834dbba 100644
--- a/src/node/test/end_to_end_test.js
+++ b/src/node/test/end_to_end_test.js
@@ -33,7 +33,6 @@
var assert = require('assert');
var grpc = require('bindings')('grpc.node');
-var port_picker = require('../port_picker');
/**
* This is used for testing functions with multiple asynchronous calls that
@@ -58,143 +57,139 @@ function multiDone(done, count) {
describe('end-to-end', function() {
it('should start and end a request without error', function(complete) {
- port_picker.nextAvailablePort(function(port) {
- var server = new grpc.Server();
- var done = multiDone(function() {
- complete();
- server.shutdown();
- }, 2);
- server.addHttp2Port(port);
- var channel = new grpc.Channel(port);
- var deadline = new Date();
- deadline.setSeconds(deadline.getSeconds() + 3);
- var status_text = 'xyz';
- var call = new grpc.Call(channel,
- 'dummy_method',
- deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
+ var server = new grpc.Server();
+ var done = multiDone(function() {
+ complete();
+ server.shutdown();
+ }, 2);
+ var port_num = server.addHttp2Port('0.0.0.0:0');
+ var channel = new grpc.Channel('localhost:' + port_num);
+ var deadline = new Date();
+ deadline.setSeconds(deadline.getSeconds() + 3);
+ var status_text = 'xyz';
+ var call = new grpc.Call(channel,
+ 'dummy_method',
+ deadline);
+ call.startInvoke(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.INVOKE_ACCEPTED);
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- });
- },function(event) {
+ call.writesDone(function(event) {
assert.strictEqual(event.type,
- grpc.completionType.CLIENT_METADATA_READ);
- },function(event) {
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ });
+ },function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.CLIENT_METADATA_READ);
+ },function(event) {
+ assert.strictEqual(event.type, grpc.completionType.FINISHED);
+ var status = event.data;
+ assert.strictEqual(status.code, grpc.status.OK);
+ assert.strictEqual(status.details, status_text);
+ done();
+ }, 0);
+
+ server.start();
+ server.requestCall(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
+ var server_call = event.call;
+ assert.notEqual(server_call, null);
+ server_call.serverAccept(function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED);
- var status = event.data;
- assert.strictEqual(status.code, grpc.status.OK);
- assert.strictEqual(status.details, status_text);
- done();
}, 0);
+ server_call.serverEndInitialMetadata(0);
+ server_call.startWriteStatus(
+ grpc.status.OK,
+ status_text,
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ done();
+ });
+ });
+ });
- server.start();
- server.requestCall(function(event) {
- assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
- var server_call = event.call;
- assert.notEqual(server_call, null);
- server_call.serverAccept(function(event) {
- assert.strictEqual(event.type, grpc.completionType.FINISHED);
- }, 0);
- server_call.serverEndInitialMetadata(0);
- server_call.startWriteStatus(
- grpc.status.OK,
- status_text,
- function(event) {
+ it('should send and receive data without error', function(complete) {
+ var req_text = 'client_request';
+ var reply_text = 'server_response';
+ var server = new grpc.Server();
+ var done = multiDone(function() {
+ complete();
+ server.shutdown();
+ }, 6);
+ var port_num = server.addHttp2Port('0.0.0.0:0');
+ var channel = new grpc.Channel('localhost:' + port_num);
+ var deadline = new Date();
+ deadline.setSeconds(deadline.getSeconds() + 3);
+ var status_text = 'success';
+ var call = new grpc.Call(channel,
+ 'dummy_method',
+ deadline);
+ call.startInvoke(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.INVOKE_ACCEPTED);
+ call.startWrite(
+ new Buffer(req_text),
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.WRITE_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ call.writesDone(function(event) {
assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
done();
});
+ }, 0);
+ call.startRead(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.READ);
+ assert.strictEqual(event.data.toString(), reply_text);
+ done();
});
- });
- });
+ },function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.CLIENT_METADATA_READ);
+ done();
+ },function(event) {
+ assert.strictEqual(event.type, grpc.completionType.FINISHED);
+ var status = event.data;
+ assert.strictEqual(status.code, grpc.status.OK);
+ assert.strictEqual(status.details, status_text);
+ done();
+ }, 0);
- it('should send and receive data without error', function(complete) {
- port_picker.nextAvailablePort(function(port) {
- var req_text = 'client_request';
- var reply_text = 'server_response';
- var server = new grpc.Server();
- var done = multiDone(function() {
- complete();
- server.shutdown();
- }, 6);
- server.addHttp2Port(port);
- var channel = new grpc.Channel(port);
- var deadline = new Date();
- deadline.setSeconds(deadline.getSeconds() + 3);
- var status_text = 'success';
- var call = new grpc.Call(channel,
- 'dummy_method',
- deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
- call.startWrite(
- new Buffer(req_text),
+ server.start();
+ server.requestCall(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
+ var server_call = event.call;
+ assert.notEqual(server_call, null);
+ server_call.serverAccept(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.FINISHED);
+ done();
+ });
+ server_call.serverEndInitialMetadata(0);
+ server_call.startRead(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.READ);
+ assert.strictEqual(event.data.toString(), req_text);
+ server_call.startWrite(
+ new Buffer(reply_text),
function(event) {
assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- done();
- });
+ assert.strictEqual(event.data,
+ grpc.opError.OK);
+ server_call.startWriteStatus(
+ grpc.status.OK,
+ status_text,
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.FINISH_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ done();
+ });
}, 0);
- call.startRead(function(event) {
- assert.strictEqual(event.type, grpc.completionType.READ);
- assert.strictEqual(event.data.toString(), reply_text);
- done();
- });
- },function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.CLIENT_METADATA_READ);
- done();
- },function(event) {
- assert.strictEqual(event.type, grpc.completionType.FINISHED);
- var status = event.data;
- assert.strictEqual(status.code, grpc.status.OK);
- assert.strictEqual(status.details, status_text);
- done();
- }, 0);
-
- server.start();
- server.requestCall(function(event) {
- assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
- var server_call = event.call;
- assert.notEqual(server_call, null);
- server_call.serverAccept(function(event) {
- assert.strictEqual(event.type, grpc.completionType.FINISHED);
- done();
- });
- server_call.serverEndInitialMetadata(0);
- server_call.startRead(function(event) {
- assert.strictEqual(event.type, grpc.completionType.READ);
- assert.strictEqual(event.data.toString(), req_text);
- server_call.startWrite(
- new Buffer(reply_text),
- function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.WRITE_ACCEPTED);
- assert.strictEqual(event.data,
- grpc.opError.OK);
- server_call.startWriteStatus(
- grpc.status.OK,
- status_text,
- function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- done();
- });
- }, 0);
- });
});
});
});
diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js
new file mode 100644
index 0000000000..410b050e8d
--- /dev/null
+++ b/src/node/test/interop_sanity_test.js
@@ -0,0 +1,71 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+var interop_server = require('../interop/interop_server.js');
+var interop_client = require('../interop/interop_client.js');
+
+var server;
+
+var port;
+
+var name_override = 'foo.test.google.com';
+
+describe('Interop tests', function() {
+ before(function(done) {
+ var server_obj = interop_server.getServer(0, true);
+ server = server_obj.server;
+ server.listen();
+ port = 'localhost:' + server_obj.port;
+ done();
+ });
+ // This depends on not using a binary stream
+ it('should pass empty_unary', function(done) {
+ interop_client.runTest(port, name_override, 'empty_unary', true, done);
+ });
+ it('should pass large_unary', function(done) {
+ interop_client.runTest(port, name_override, 'large_unary', true, done);
+ });
+ it('should pass client_streaming', function(done) {
+ interop_client.runTest(port, name_override, 'client_streaming', true, done);
+ });
+ it('should pass server_streaming', function(done) {
+ interop_client.runTest(port, name_override, 'server_streaming', true, done);
+ });
+ it('should pass ping_pong', function(done) {
+ interop_client.runTest(port, name_override, 'ping_pong', true, done);
+ });
+ // This depends on the new invoke API
+ it.skip('should pass empty_stream', function(done) {
+ interop_client.runTest(port, name_override, 'empty_stream', true, done);
+ });
+});
diff --git a/src/node/test/math_client_test.js b/src/node/test/math_client_test.js
index 45c956d179..0e365bf870 100644
--- a/src/node/test/math_client_test.js
+++ b/src/node/test/math_client_test.js
@@ -32,7 +32,6 @@
*/
var assert = require('assert');
-var port_picker = require('../port_picker');
var grpc = require('..');
var math = grpc.load(__dirname + '/../examples/math.proto').math;
@@ -50,18 +49,17 @@ var server = require('../examples/math_server.js');
describe('Math client', function() {
before(function(done) {
- port_picker.nextAvailablePort(function(port) {
- server.bind(port).listen();
- math_client = new math.Math(port);
- done();
- });
+ var port_num = server.bind('0.0.0.0:0');
+ server.listen();
+ math_client = new math.Math('localhost:' + port_num);
+ done();
});
after(function() {
server.shutdown();
});
it('should handle a single request', function(done) {
var arg = {dividend: 7, divisor: 4};
- var call = math_client.Div(arg, function handleDivResult(err, value) {
+ var call = math_client.div(arg, function handleDivResult(err, value) {
assert.ifError(err);
assert.equal(value.quotient, 1);
assert.equal(value.remainder, 3);
@@ -72,7 +70,7 @@ describe('Math client', function() {
});
});
it('should handle a server streaming request', function(done) {
- var call = math_client.Fib({limit: 7});
+ var call = math_client.fib({limit: 7});
var expected_results = [1, 1, 2, 3, 5, 8, 13];
var next_expected = 0;
call.on('data', function checkResponse(value) {
@@ -85,7 +83,7 @@ describe('Math client', function() {
});
});
it('should handle a client streaming request', function(done) {
- var call = math_client.Sum(function handleSumResult(err, value) {
+ var call = math_client.sum(function handleSumResult(err, value) {
assert.ifError(err);
assert.equal(value.num, 21);
});
@@ -103,7 +101,7 @@ describe('Math client', function() {
assert.equal(value.quotient, index);
assert.equal(value.remainder, 1);
}
- var call = math_client.DivMany();
+ var call = math_client.divMany();
var response_index = 0;
call.on('data', function(value) {
checkResponse(response_index, value);
diff --git a/src/node/test/server_test.js b/src/node/test/server_test.js
index 79f7b32948..61aef4677e 100644
--- a/src/node/test/server_test.js
+++ b/src/node/test/server_test.js
@@ -34,7 +34,6 @@
var assert = require('assert');
var grpc = require('bindings')('grpc.node');
var Server = require('../server');
-var port_picker = require('../port_picker');
/**
* This is used for testing functions with multiple asynchronous calls that
@@ -68,54 +67,52 @@ function echoHandler(stream) {
describe('echo server', function() {
it('should echo inputs as responses', function(done) {
done = multiDone(done, 4);
- port_picker.nextAvailablePort(function(port) {
- var server = new Server();
- server.bind(port);
- server.register('echo', echoHandler);
- server.start();
+ var server = new Server();
+ var port_num = server.bind('[::]:0');
+ server.register('echo', echoHandler);
+ server.start();
- var req_text = 'echo test string';
- var status_text = 'OK';
+ var req_text = 'echo test string';
+ var status_text = 'OK';
- var channel = new grpc.Channel(port);
- var deadline = new Date();
- deadline.setSeconds(deadline.getSeconds() + 3);
- var call = new grpc.Call(channel,
- 'echo',
- deadline);
- call.startInvoke(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.INVOKE_ACCEPTED);
- call.startWrite(
- new Buffer(req_text),
- function(event) {
+ var channel = new grpc.Channel('localhost:' + port_num);
+ var deadline = new Date();
+ deadline.setSeconds(deadline.getSeconds() + 3);
+ var call = new grpc.Call(channel,
+ 'echo',
+ deadline);
+ call.startInvoke(function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.INVOKE_ACCEPTED);
+ call.startWrite(
+ new Buffer(req_text),
+ function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.WRITE_ACCEPTED);
+ assert.strictEqual(event.data, grpc.opError.OK);
+ call.writesDone(function(event) {
assert.strictEqual(event.type,
- grpc.completionType.WRITE_ACCEPTED);
+ grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
- call.writesDone(function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.FINISH_ACCEPTED);
- assert.strictEqual(event.data, grpc.opError.OK);
- done();
- });
- }, 0);
- call.startRead(function(event) {
- assert.strictEqual(event.type, grpc.completionType.READ);
- assert.strictEqual(event.data.toString(), req_text);
- done();
- });
- },function(event) {
- assert.strictEqual(event.type,
- grpc.completionType.CLIENT_METADATA_READ);
+ done();
+ });
+ }, 0);
+ call.startRead(function(event) {
+ assert.strictEqual(event.type, grpc.completionType.READ);
+ assert.strictEqual(event.data.toString(), req_text);
done();
- },function(event) {
- assert.strictEqual(event.type, grpc.completionType.FINISHED);
- var status = event.data;
- assert.strictEqual(status.code, grpc.status.OK);
- assert.strictEqual(status.details, status_text);
- server.shutdown();
- done();
- }, 0);
- });
+ });
+ },function(event) {
+ assert.strictEqual(event.type,
+ grpc.completionType.CLIENT_METADATA_READ);
+ done();
+ },function(event) {
+ assert.strictEqual(event.type, grpc.completionType.FINISHED);
+ var status = event.data;
+ assert.strictEqual(status.code, grpc.status.OK);
+ assert.strictEqual(status.details, status_text);
+ server.shutdown();
+ done();
+ }, 0);
});
});
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index 8d0d8ec3bc..34f1a156eb 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -59,9 +59,9 @@ describe('Surface server constructor', function() {
assert.throws(function() {
new Server({
'math.Math': {
- 'Div': function() {},
- 'DivMany': function() {},
- 'Fib': function() {}
+ 'div': function() {},
+ 'divMany': function() {},
+ 'fib': function() {}
}
});
}, /math.Math.Sum/);
diff --git a/src/php/ext/grpc/credentials.c b/src/php/ext/grpc/credentials.c
index 2a83d1cbc1..46c825a48f 100644
--- a/src/php/ext/grpc/credentials.c
+++ b/src/php/ext/grpc/credentials.c
@@ -77,24 +77,25 @@ PHP_METHOD(Credentials, createDefault) {
*/
PHP_METHOD(Credentials, createSsl) {
char *pem_root_certs;
- char *pem_private_key = NULL;
- char *pem_cert_chain = NULL;
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
int root_certs_length, private_key_length = 0, cert_chain_length = 0;
+ pem_key_cert_pair.private_key = pem_key_cert_pair.cert_chain = NULL;
+
/* "s|s!s! == 1 string, 2 optional nullable strings */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s!s!",
&pem_root_certs, &root_certs_length,
- &pem_private_key, &private_key_length,
- &pem_cert_chain, &cert_chain_length) == FAILURE) {
+ &pem_key_cert_pair.private_key, &private_key_length,
+ &pem_key_cert_pair.cert_chain,
+ &cert_chain_length) == FAILURE) {
zend_throw_exception(spl_ce_InvalidArgumentException,
"createSsl expects 1 to 3 strings", 1 TSRMLS_CC);
return;
}
grpc_credentials *creds = grpc_ssl_credentials_create(
- (unsigned char *)pem_root_certs, (size_t)root_certs_length,
- (unsigned char *)pem_private_key, (size_t)private_key_length,
- (unsigned char *)pem_cert_chain, (size_t)cert_chain_length);
+ pem_root_certs,
+ pem_key_cert_pair.private_key == NULL ? NULL : &pem_key_cert_pair);
zval *creds_object = grpc_php_wrap_credentials(creds);
RETURN_DESTROY_ZVAL(creds_object);
}
diff --git a/src/php/ext/grpc/server_credentials.c b/src/php/ext/grpc/server_credentials.c
index 1f8e58aa4d..3d43d6a78c 100644
--- a/src/php/ext/grpc/server_credentials.c
+++ b/src/php/ext/grpc/server_credentials.c
@@ -66,24 +66,22 @@ zval *grpc_php_wrap_server_credentials(grpc_server_credentials *wrapped) {
*/
PHP_METHOD(ServerCredentials, createSsl) {
char *pem_root_certs = 0;
- char *pem_private_key;
- char *pem_cert_chain;
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
int root_certs_length = 0, private_key_length, cert_chain_length;
/* "s!ss" == 1 nullable string, 2 strings */
+ /* TODO: support multiple key cert pairs. */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s!ss", &pem_root_certs,
- &root_certs_length, &pem_private_key,
- &private_key_length, &pem_cert_chain,
+ &root_certs_length, &pem_key_cert_pair.private_key,
+ &private_key_length, &pem_key_cert_pair.cert_chain,
&cert_chain_length) == FAILURE) {
zend_throw_exception(spl_ce_InvalidArgumentException,
"createSsl expects 3 strings", 1 TSRMLS_CC);
return;
}
- grpc_server_credentials *creds = grpc_ssl_server_credentials_create(
- (unsigned char *)pem_root_certs, (size_t)root_certs_length,
- (unsigned char *)pem_private_key, (size_t)private_key_length,
- (unsigned char *)pem_cert_chain, (size_t)cert_chain_length);
+ grpc_server_credentials *creds =
+ grpc_ssl_server_credentials_create(pem_root_certs, &pem_key_cert_pair, 1);
zval *creds_object = grpc_php_wrap_server_credentials(creds);
RETURN_DESTROY_ZVAL(creds_object);
}
diff --git a/src/python/__init__.py b/src/python/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/__init__.py
diff --git a/src/python/_framework/__init__.py b/src/python/_framework/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/__init__.py
diff --git a/src/python/_framework/foundation/__init__.py b/src/python/_framework/foundation/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/python/_framework/foundation/__init__.py
diff --git a/src/ruby/lib/grpc/beefcake.rb b/src/python/_framework/foundation/_logging_pool_test.py
index fd3ebbf4b8..ffe07c788d 100644
--- a/src/ruby/lib/grpc/beefcake.rb
+++ b/src/python/_framework/foundation/_logging_pool_test.py
@@ -1,4 +1,4 @@
-# Copyright 2014, Google Inc.
+# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,31 +27,38 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-require 'beefcake'
-
-module Beefcake
- # Re-open the beefcake message module to add a static encode
- #
- # This is a temporary measure while beefcake is used as the default proto
- # library for developing grpc ruby. Once that changes to the official proto
- # library this can be removed. It's necessary to allow the update the service
- # module to assume a static encode method.
- # TODO(temiola): remove this.
- module Message
- # additional mixin module that adds static encode method when include
- module StaticEncode
- # encodes o with its instance#encode method
- def encode(o)
- o.encode
- end
- end
-
- # extend self.included in Beefcake::Message to include StaticEncode
- def self.included(o)
- o.extend StaticEncode
- o.extend Dsl
- o.extend Decode
- o.send(:include, Encode)
- end
- end
-end
+"""Tests for google3.net.rpc.python.framework.foundation.logging_pool."""
+
+import unittest
+
+from _framework.foundation import logging_pool
+
+_POOL_SIZE = 16
+
+
+class LoggingPoolTest(unittest.TestCase):
+
+ def testUpAndDown(self):
+ pool = logging_pool.pool(_POOL_SIZE)
+ pool.shutdown(wait=True)
+
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ self.assertIsNotNone(pool)
+
+ def testTaskExecuted(self):
+ test_list = []
+
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ pool.submit(lambda: test_list.append(object())).result()
+
+ self.assertTrue(test_list)
+
+ def testException(self):
+ with logging_pool.pool(_POOL_SIZE) as pool:
+ raised_exception = pool.submit(lambda: 1/0).exception()
+
+ self.assertIsNotNone(raised_exception)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/python/_framework/foundation/logging_pool.py b/src/python/_framework/foundation/logging_pool.py
new file mode 100644
index 0000000000..7c7a6eebfc
--- /dev/null
+++ b/src/python/_framework/foundation/logging_pool.py
@@ -0,0 +1,83 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A thread pool that logs exceptions raised by tasks executed within it."""
+
+import functools
+import logging
+
+from concurrent import futures
+
+
+def _wrap(behavior):
+ """Wraps an arbitrary callable behavior in exception-logging."""
+ @functools.wraps(behavior)
+ def _wrapping(*args, **kwargs):
+ try:
+ return behavior(*args, **kwargs)
+ except Exception as e:
+ logging.exception('Unexpected exception from task run in logging pool!')
+ raise
+ return _wrapping
+
+
+class _LoggingPool(object):
+ """An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
+
+ def __init__(self, backing_pool):
+ self._backing_pool = backing_pool
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._backing_pool.shutdown(wait=True)
+
+ def submit(self, fn, *args, **kwargs):
+ return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
+
+ def map(self, func, *iterables, **kwargs):
+ return self._backing_pool.map(
+ _wrap(func), *iterables, timeout=kwargs.get('timeout', None))
+
+ def shutdown(self, wait=True):
+ self._backing_pool.shutdown(wait=wait)
+
+
+def pool(max_workers):
+ """Creates a thread pool that logs exceptions raised by the tasks within it.
+
+ Args:
+ max_workers: The maximum number of worker threads to allow the pool.
+
+ Returns:
+ A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
+ raised by the tasks executed within it.
+ """
+ return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
diff --git a/src/ruby/README.md b/src/ruby/README.md
index 23aec2b20a..7f7558dc67 100755
--- a/src/ruby/README.md
+++ b/src/ruby/README.md
@@ -1,64 +1,63 @@
-Ruby for GRPC
-=============
+gRPC Ruby
+=========
-LAYOUT
-------
+A Ruby implementation of gRPC, Google's RPC library.
-Directory structure is the recommended layout for [ruby extensions](http://guides.rubygems.org/gems-with-extensions/)
- * ext: the extension code
- * lib: the entrypoint grpc ruby library to be used in a 'require' statement
- * test: tests
+INSTALLATION PREREQUISITES
+--------------------------
+This requires Ruby 2.x, as the rpc api surface uses keyword args.
-DEPENDENCIES
-------------
+INSTALLING
+----------
-* Extension
+- Install the gRPC core library
+TODO: describe this, once the core distribution mechanism is defined.
-The extension can be built and tested using
-[rake](https://rubygems.org/gems/rake). However, the rake-extensiontask rule
-is not supported on older versions of rubygems, and the necessary version of
-rubygems.
+$ gem install grpc
-This is resolved by using [RVM](https://rvm.io/) instead; install a single-user
-ruby environment, and develop on the latest stable version of ruby (2.1.5).
+Installing from source
+----------------------
-INSTALLATION PREREQUISITES
---------------------------
-
-Install RVM
+- Build or Install the gRPC core
+E.g, from the root of the grpc [git repo](https://github.com/google/grpc)
+$ cd ../..
+$ make && sudo make install
+- Install Ruby 2.x. Consider doing this with [RVM](http://rvm.io), it's a nice way of controlling
+ the exact ruby version that's used.
$ command curl -sSL https://rvm.io/mpapis.asc | gpg --import -
$ \curl -sSL https://get.rvm.io | bash -s stable --ruby
$
$ # follow the instructions to ensure that your're using the latest stable version of Ruby
$ # and that the rvm command is installed
-$
-$ gem install bundler # install bundler, the standard ruby package manager
-HACKING
--------
+- Install [bundler](http://bundler.io/)
+$ gem install bundler
-The extension can be built and tested using the Rakefile.
+- Finally, install grpc ruby locally.
+$ cd <install_dir>
+$ bundle install
+$ rake # compiles the extension, runs the unit tests, see rake -T for other options
-$ # create a workspace
-$ git5 start <your-git5-branch> net/grpc
-$
-$ # build the C library and install it in $HOME/grpc_dev
-$ <google3>/net/grpc/c/build_gyp/build_grpc_dev.sh
-$
-$ # build the ruby extension and test it.
-$ cd google3_dir/net/grpc/ruby
-$ rake
-Finally, install grpc ruby locally.
+CONTENTS
+--------
-$ cd <this_dir>
-$
-$ # update the Gemfile, modify the line beginning # gem 'beefcake' to refer to
-$ # the patched beefcake dir
-$
-$ bundle install
+Directory structure is the layout for [ruby extensions](http://guides.rubygems.org/gems-with-extensions/)
+
+ * ext: the extension code
+ * lib: the entrypoint grpc ruby library to be used in a 'require' statement
+ * spec: tests
+ * bin: example gRPC clients and servers, e.g,
+```ruby
+# client
+stub = Math::Math::Stub.new('my.test.math.server.com:8080')
+req = Math::DivArgs.new(dividend: 7, divisor: 3)
+logger.info("div(7/3): req=#{req.inspect}")
+resp = stub.div(req, INFINITE_FUTURE)
+logger.info("Answer: #{resp.inspect}")
+```
diff --git a/src/ruby/bin/interop/README.md b/src/ruby/bin/interop/README.md
index 04020868a4..84fc663620 100755
--- a/src/ruby/bin/interop/README.md
+++ b/src/ruby/bin/interop/README.md
@@ -1,11 +1,8 @@
Interop test protos
===================
-These were generated by a patched version of beefcake and a patched version of
-protoc.
+These ruby classes were generated with protoc v3, using grpc's ruby compiler
+plugin.
-- set up and access of the patched versions is described in ../../README.md
-
-The actual test proto is found in Google3 at
-
-- third_party/stubby/testing/proto/test.proto
+- As of 2015/01 protoc v3 is available in the
+[google-protobuf](https://github.com/google/protobuf) repo
diff --git a/src/ruby/bin/interop/interop_client.rb b/src/ruby/bin/interop/interop_client.rb
index 0ce10d9e30..0ea7f376be 100755
--- a/src/ruby/bin/interop/interop_client.rb
+++ b/src/ruby/bin/interop/interop_client.rb
@@ -107,11 +107,11 @@ class PingPongPlayer
@msg_sizes.each do |m|
req_size, resp_size = m
req = req_cls.new(payload: Payload.new(body: nulls(req_size)),
- response_type: COMPRESSABLE,
+ response_type: :COMPRESSABLE,
response_parameters: [p_cls.new(size: resp_size)])
yield req
resp = @queue.pop
- assert_equal(PayloadType.lookup(COMPRESSABLE), resp.payload.type,
+ assert_equal(:COMPRESSABLE, resp.payload.type,
'payload type is wrong')
assert_equal(resp_size, resp.payload.body.length,
'payload body #{i} has the wrong length')
@@ -149,11 +149,13 @@ class NamedTests
# FAILED
def large_unary
req_size, wanted_response_size = 271_828, 314_159
- payload = Payload.new(type: COMPRESSABLE, body: nulls(req_size))
- req = SimpleRequest.new(response_type: COMPRESSABLE,
+ payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
+ req = SimpleRequest.new(response_type: :COMPRESSABLE,
response_size: wanted_response_size,
payload: payload)
resp = @stub.unary_call(req)
+ assert_equal(:COMPRESSABLE, resp.payload.type,
+ 'large_unary: payload had the wrong type')
assert_equal(wanted_response_size, resp.payload.body.length,
'large_unary: payload had the wrong length')
assert_equal(nulls(wanted_response_size), resp.payload.body,
@@ -185,12 +187,12 @@ class NamedTests
def server_streaming
msg_sizes = [31_415, 9, 2653, 58_979]
response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) }
- req = StreamingOutputCallRequest.new(response_type: COMPRESSABLE,
+ req = StreamingOutputCallRequest.new(response_type: :COMPRESSABLE,
response_parameters: response_spec)
resps = @stub.streaming_output_call(req)
resps.each_with_index do |r, i|
assert i < msg_sizes.length, 'too many responses'
- assert_equal(PayloadType.lookup(COMPRESSABLE), r.payload.type,
+ assert_equal(:COMPRESSABLE, r.payload.type,
'payload type is wrong')
assert_equal(msg_sizes[i], r.payload.body.length,
'payload body #{i} has the wrong length')
@@ -235,7 +237,7 @@ def parse_options
end
end.parse!
- %w(server_host, server_port, test_case).each do |arg|
+ %w(server_host server_port test_case).each do |arg|
if options[arg].nil?
fail(OptionParser::MissingArgument, "please specify --#{arg}")
end
diff --git a/src/ruby/bin/interop/interop_server.rb b/src/ruby/bin/interop/interop_server.rb
index 9273dcdf91..83212823f6 100755
--- a/src/ruby/bin/interop/interop_server.rb
+++ b/src/ruby/bin/interop/interop_server.rb
@@ -104,7 +104,7 @@ class TestTarget < Grpc::Testing::TestService::Service
def unary_call(simple_req, _call)
req_size = simple_req.response_size
- SimpleResponse.new(payload: Payload.new(type: COMPRESSABLE,
+ SimpleResponse.new(payload: Payload.new(type: :COMPRESSABLE,
body: nulls(req_size)))
end
@@ -145,8 +145,8 @@ class TestTarget < Grpc::Testing::TestService::Service
end
def half_duplex_call(reqs)
- # TODO(temiola): clarify the behaviour of the half_duplex_call, it's not
- # currently used in any tests
+ # TODO: update with unique behaviour of the half_duplex_call if that's
+ # ever required by any of the tests.
full_duplex_call(reqs)
end
end
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index e948504e9e..cbf41eda8b 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -68,13 +68,9 @@ $CFLAGS << ' -Wno-return-type '
$CFLAGS << ' -Wall '
$CFLAGS << ' -pedantic '
-$LDFLAGS << ' -lgrpc -lgpr -levent -levent_pthreads -levent_core'
-
-# crash('need grpc lib') unless have_library('grpc', 'grpc_channel_destroy')
-#
-# TODO(temiola): figure out why this stopped working, but the so is built OK
-# and the tests pass
+$LDFLAGS << ' -lgrpc -lgpr -ldl'
+crash('need grpc lib') unless have_library('grpc', 'grpc_channel_destroy')
have_library('grpc', 'grpc_channel_destroy')
crash('need gpr lib') unless have_library('gpr', 'gpr_now')
create_makefile('grpc/grpc')
diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c
index c1b74e2606..47776a991a 100644
--- a/src/ruby/ext/grpc/rb_completion_queue.c
+++ b/src/ruby/ext/grpc/rb_completion_queue.c
@@ -75,7 +75,7 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
grpc_completion_queue_shutdown(cq);
next_call.cq = cq;
next_call.event = NULL;
- /* TODO(temiola): the timeout should be a module level constant that defaults
+ /* TODO: the timeout should be a module level constant that defaults
* to gpr_inf_future.
*
* - at the moment this does not work, it stalls. Using a small timeout like
diff --git a/src/ruby/ext/grpc/rb_credentials.c b/src/ruby/ext/grpc/rb_credentials.c
index 5dec51824d..31f47f3b76 100644
--- a/src/ruby/ext/grpc/rb_credentials.c
+++ b/src/ruby/ext/grpc/rb_credentials.c
@@ -214,6 +214,7 @@ static VALUE grpc_rb_credentials_init(int argc, VALUE *argv, VALUE self) {
VALUE pem_cert_chain = Qnil;
grpc_rb_credentials *wrapper = NULL;
grpc_credentials *creds = NULL;
+ /* TODO: Remove mandatory arg when we support default roots. */
/* "12" == 1 mandatory arg, 2 (credentials) is optional */
rb_scan_args(argc, argv, "12", &pem_root_certs, &pem_private_key,
&pem_cert_chain);
@@ -225,22 +226,12 @@ static VALUE grpc_rb_credentials_init(int argc, VALUE *argv, VALUE self) {
return Qnil;
}
if (pem_private_key == Qnil && pem_cert_chain == Qnil) {
- creds = grpc_ssl_credentials_create(RSTRING_PTR(pem_root_certs),
- RSTRING_LEN(pem_root_certs), NULL, 0,
- NULL, 0);
- } else if (pem_cert_chain == Qnil) {
- creds = grpc_ssl_credentials_create(
- RSTRING_PTR(pem_root_certs), RSTRING_LEN(pem_root_certs),
- RSTRING_PTR(pem_private_key), RSTRING_LEN(pem_private_key),
- RSTRING_PTR(pem_cert_chain), RSTRING_LEN(pem_cert_chain));
- } else if (pem_private_key == Qnil) {
- creds = grpc_ssl_credentials_create(
- RSTRING_PTR(pem_root_certs), RSTRING_LEN(pem_root_certs), NULL, 0,
- RSTRING_PTR(pem_cert_chain), RSTRING_LEN(pem_cert_chain));
+ creds = grpc_ssl_credentials_create(RSTRING_PTR(pem_root_certs), NULL);
} else {
+ grpc_ssl_pem_key_cert_pair key_cert_pair = {RSTRING_PTR(pem_private_key),
+ RSTRING_PTR(pem_cert_chain)};
creds = grpc_ssl_credentials_create(
- RSTRING_PTR(pem_root_certs), RSTRING_LEN(pem_root_certs),
- RSTRING_PTR(pem_private_key), RSTRING_LEN(pem_private_key), NULL, 0);
+ RSTRING_PTR(pem_root_certs), &key_cert_pair);
}
if (creds == NULL) {
rb_raise(rb_eRuntimeError, "could not create a credentials, not sure why");
diff --git a/src/ruby/ext/grpc/rb_server_credentials.c b/src/ruby/ext/grpc/rb_server_credentials.c
index e534c11444..4f6c67ea5e 100644
--- a/src/ruby/ext/grpc/rb_server_credentials.c
+++ b/src/ruby/ext/grpc/rb_server_credentials.c
@@ -145,8 +145,10 @@ static ID id_pem_cert_chain;
static VALUE grpc_rb_server_credentials_init(VALUE self, VALUE pem_root_certs,
VALUE pem_private_key,
VALUE pem_cert_chain) {
+ /* TODO support multiple key cert pairs in the ruby API. */
grpc_rb_server_credentials *wrapper = NULL;
grpc_server_credentials *creds = NULL;
+ grpc_ssl_pem_key_cert_pair key_cert_pair = {NULL, NULL};
Data_Get_Struct(self, grpc_rb_server_credentials, wrapper);
if (pem_cert_chain == Qnil) {
rb_raise(rb_eRuntimeError,
@@ -157,15 +159,13 @@ static VALUE grpc_rb_server_credentials_init(VALUE self, VALUE pem_root_certs,
"could not create a server credential: nil pem_private_key");
return Qnil;
}
+ key_cert_pair.private_key = RSTRING_PTR(pem_private_key);
+ key_cert_pair.cert_chain = RSTRING_PTR(pem_cert_chain);
if (pem_root_certs == Qnil) {
- creds = grpc_ssl_server_credentials_create(
- NULL, 0, RSTRING_PTR(pem_private_key), RSTRING_LEN(pem_private_key),
- RSTRING_PTR(pem_cert_chain), RSTRING_LEN(pem_cert_chain));
+ creds = grpc_ssl_server_credentials_create(NULL, &key_cert_pair, 1);
} else {
- creds = grpc_ssl_server_credentials_create(
- RSTRING_PTR(pem_root_certs), RSTRING_LEN(pem_root_certs),
- RSTRING_PTR(pem_private_key), RSTRING_LEN(pem_private_key),
- RSTRING_PTR(pem_cert_chain), RSTRING_LEN(pem_cert_chain));
+ creds = grpc_ssl_server_credentials_create(RSTRING_PTR(pem_root_certs),
+ &key_cert_pair, 1);
}
if (creds == NULL) {
rb_raise(rb_eRuntimeError, "could not create a credentials, not sure why");
diff --git a/src/ruby/grpc.gemspec b/src/ruby/grpc.gemspec
index 8d7f44f30e..450362f5a8 100755
--- a/src/ruby/grpc.gemspec
+++ b/src/ruby/grpc.gemspec
@@ -5,11 +5,11 @@ require 'grpc/version'
Gem::Specification.new do |s|
s.name = 'grpc'
s.version = Google::RPC::VERSION
- s.authors = ['One Platform Team']
- s.email = 'stubby-team@google.com'
- s.homepage = 'http://go/grpc'
+ s.authors = ['gRPC Authors']
+ s.email = 'tbetbetbe@gmail.com'
+ s.homepage = 'https://github.com/google/grpc/tree/master/src/ruby'
s.summary = 'Google RPC system in Ruby'
- s.description = 'Send RPCs from Ruby'
+ s.description = 'Send RPCs from Ruby using Google\'s RPC system'
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- spec/*`.split("\n")
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 14ef6c531f..36877dc648 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -142,7 +142,7 @@ module Google
# during bidi-streaming, read the requests to send from a separate thread
# read so that read_loop does not block waiting for requests to read.
def start_write_loop(requests, is_client: true)
- Thread.new do # TODO(temiola) run on a thread pool
+ Thread.new do # TODO: run on a thread pool
write_tag = Object.new
begin
count = 0
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 5ea3cc94d6..40c5ec118e 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -233,10 +233,6 @@ module Google
end
def new_active_server_call(call, new_server_rpc)
- # TODO(temiola): perhaps reuse the main server completion queue here,
- # but for now, create a new completion queue per call, pending best
- # practice usage advice from the c core.
-
# Accept the call. This is necessary even if a status is to be sent
# back immediately
finished_tag = Object.new
@@ -340,7 +336,7 @@ module Google
@workers.size.times { schedule { throw :exit } }
@stopped = true
- # TODO(temiola): allow configuration of the keepalive period
+ # TODO: allow configuration of the keepalive period
keep_alive = 5
@stop_mutex.synchronize do
@stop_cond.wait(@stop_mutex, keep_alive) if @workers.size > 0
diff --git a/src/ruby/lib/grpc/logconfig.rb b/src/ruby/lib/grpc/logconfig.rb
index 6d8e1899a0..6442f23e89 100644
--- a/src/ruby/lib/grpc/logconfig.rb
+++ b/src/ruby/lib/grpc/logconfig.rb
@@ -34,7 +34,7 @@ include Logging.globally # logger is accessible everywhere
Logging.logger.root.appenders = Logging.appenders.stdout
Logging.logger.root.level = :info
-# TODO(temiola): provide command-line configuration for logging
+# TODO: provide command-line configuration for logging
Logging.logger['Google::RPC'].level = :debug
Logging.logger['Google::RPC::ActiveCall'].level = :info
Logging.logger['Google::RPC::BidiCall'].level = :info
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index 1bcbc66446..df70e56bca 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -294,7 +294,7 @@ shared_examples 'GRPC metadata delivery works OK' do
expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag)
# there is the HTTP status metadata, though there should not be any
- # TODO(temiola): update this with the bug number to be resolved
+ # TODO: update this with the bug number to be resolved
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect(ev.result).to eq(':status' => '200')
end
diff --git a/src/ruby/spec/testdata/README b/src/ruby/spec/testdata/README
index ed72661e97..cb20dcb49f 100755
--- a/src/ruby/spec/testdata/README
+++ b/src/ruby/spec/testdata/README
@@ -1,4 +1 @@
These are test keys *NOT* to be used in production.
-http://go/keyhunt requires this README
-
-CONFIRMEDTESTKEY
diff --git a/templates/Makefile.template b/templates/Makefile.template
index 11b9438a68..b80e80ca81 100644
--- a/templates/Makefile.template
+++ b/templates/Makefile.template
@@ -19,6 +19,14 @@
return 'gens/' + m.group(1) + '.pb.cc'
%>
+
+# Basic platform detection
+HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
+ifeq ($(SYSTEM),)
+SYSTEM = $(HOST_SYSTEM)
+endif
+
+
# Configurations
VALID_CONFIG_opt = 1
@@ -132,10 +140,15 @@ LDFLAGS += $(LDFLAGS_$(CONFIG))
CFLAGS += -std=c89 -pedantic
CXXFLAGS += -std=c++11
CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long
-LDFLAGS += -g -pthread -fPIC
+LDFLAGS += -g -fPIC
INCLUDES = . include gens
+ifeq ($(SYSTEM),Darwin)
+LIBS = m z
+else
LIBS = rt m z pthread
+LDFLAGS += -pthread
+endif
LIBSXX = protobuf
LIBS_PROTOC = protoc protobuf
@@ -173,11 +186,6 @@ HOST_LDLIBS = $(LDLIBS)
# These are automatically computed variables.
# There shouldn't be any need to change anything from now on.
-HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
-ifeq ($(SYSTEM),)
-SYSTEM = $(HOST_SYSTEM)
-endif
-
ifeq ($(SYSTEM),MINGW32)
SHARED_EXT = dll
endif
@@ -340,8 +348,12 @@ libs/$(CONFIG)/zlib/libz.a:
$(Q)cp third_party/zlib/libz.a libs/$(CONFIG)/zlib
libs/$(CONFIG)/openssl/libssl.a:
- $(E) "[MAKE] Building openssl"
+ $(E) "[MAKE] Building openssl for $(SYSTEM)"
+ifeq ($(SYSTEM),Darwin)
+ $(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./Configure darwin64-x86_64-cc $(OPENSSL_CONFIG_$(CONFIG)))
+else
$(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config $(OPENSSL_CONFIG_$(CONFIG)))
+endif
$(Q)$(MAKE) -C third_party/openssl clean
$(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl
$(Q)mkdir -p libs/$(CONFIG)/openssl
@@ -695,6 +707,7 @@ libs/$(CONFIG)/lib${lib.name}.a: $(ZLIB_DEP) $(LIB${lib.name.upper()}_OBJS)
% endif
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
+ $(Q) rm -f libs/$(CONFIG)/lib${lib.name}.a
$(Q) $(AR) rcs libs/$(CONFIG)/lib${lib.name}.a $(LIB${lib.name.upper()}_OBJS)
% if lib.get('baselib', False):
% if lib.get('secure', True):
@@ -707,6 +720,9 @@ libs/$(CONFIG)/lib${lib.name}.a: $(ZLIB_DEP) $(LIB${lib.name.upper()}_OBJS)
$(Q) rm -rf tmp-merge
% endif
% endif
+ifeq ($(SYSTEM),Darwin)
+ $(Q) ranlib libs/$(CONFIG)/lib${lib.name}.a
+endif
<%
if lib.language == 'c++':
diff --git a/test/core/end2end/data/prod_roots_certs.c b/test/core/end2end/data/prod_roots_certs.c
index 21a199ce37..3b66d236c3 100644
--- a/test/core/end2end/data/prod_roots_certs.c
+++ b/test/core/end2end/data/prod_roots_certs.c
@@ -31,7 +31,7 @@
*
*/
-unsigned char prod_roots_certs[] = {
+const char prod_roots_certs[] = {
0x23, 0x20, 0x49, 0x73, 0x73, 0x75, 0x65, 0x72, 0x3a, 0x20, 0x43, 0x4e,
0x3d, 0x47, 0x54, 0x45, 0x20, 0x43, 0x79, 0x62, 0x65, 0x72, 0x54, 0x72,
0x75, 0x73, 0x74, 0x20, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x52,
@@ -11270,5 +11270,4 @@ unsigned char prod_roots_certs[] = {
0x33, 0x50, 0x59, 0x74, 0x6c, 0x4e, 0x58, 0x4c, 0x66, 0x62, 0x51, 0x34,
0x64, 0x64, 0x49, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44,
0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45,
- 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a};
-unsigned int prod_roots_certs_size = 134862;
+ 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x00};
diff --git a/test/core/end2end/data/server1_cert.c b/test/core/end2end/data/server1_cert.c
index da1d36653c..134b9cb98e 100644
--- a/test/core/end2end/data/server1_cert.c
+++ b/test/core/end2end/data/server1_cert.c
@@ -31,7 +31,7 @@
*
*/
-unsigned char test_server1_cert[] = {
+const char test_server1_cert[] = {
0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x6d, 0x7a, 0x43, 0x43,
@@ -112,5 +112,4 @@ unsigned char test_server1_cert[] = {
0x32, 0x77, 0x65, 0x2f, 0x4b, 0x44, 0x34, 0x6f, 0x6a, 0x66, 0x39, 0x73,
0x3d, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20, 0x43,
0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
- 0x2d, 0x2d, 0x2d, 0x0a};
-unsigned int test_server1_cert_size = 964;
+ 0x2d, 0x2d, 0x2d, 0x0a, 0x00};
diff --git a/test/core/end2end/data/server1_key.c b/test/core/end2end/data/server1_key.c
index 3540505467..992d3c032a 100644
--- a/test/core/end2end/data/server1_key.c
+++ b/test/core/end2end/data/server1_key.c
@@ -31,7 +31,7 @@
*
*/
-unsigned char test_server1_key[] = {
+const char test_server1_key[] = {
0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x52,
0x53, 0x41, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b,
0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43,
@@ -105,5 +105,4 @@ unsigned char test_server1_key[] = {
0x6e, 0x68, 0x66, 0x66, 0x46, 0x79, 0x65, 0x37, 0x53, 0x42, 0x58, 0x79,
0x61, 0x67, 0x3d, 0x3d, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e,
0x44, 0x20, 0x52, 0x53, 0x41, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54,
- 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a};
-unsigned int test_server1_key_size = 887;
+ 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x00};
diff --git a/test/core/end2end/data/ssl_test_data.h b/test/core/end2end/data/ssl_test_data.h
index 6ac19945b2..3456ebebd4 100644
--- a/test/core/end2end/data/ssl_test_data.h
+++ b/test/core/end2end/data/ssl_test_data.h
@@ -34,14 +34,10 @@
#ifndef __GRPC_TEST_END2END_DATA_SSL_TEST_DATA_H__
#define __GRPC_TEST_END2END_DATA_SSL_TEST_DATA_H__
-extern unsigned char test_root_cert[];
-extern unsigned int test_root_cert_size;
-extern unsigned char test_server1_cert[];
-extern unsigned int test_server1_cert_size;
-extern unsigned char test_server1_key[];
-extern unsigned int test_server1_key_size;
+extern const char test_root_cert[];
+extern const char test_server1_cert[];
+extern const char test_server1_key[];
-extern unsigned char prod_roots_certs[];
-extern unsigned int prod_roots_certs_size;
+extern const char prod_roots_certs[];
#endif /* __GRPC_TEST_END2END_DATA_SSL_TEST_DATA_H__ */
diff --git a/test/core/end2end/data/test_root_cert.c b/test/core/end2end/data/test_root_cert.c
index fd01953ccb..f358b0b79a 100644
--- a/test/core/end2end/data/test_root_cert.c
+++ b/test/core/end2end/data/test_root_cert.c
@@ -31,7 +31,7 @@
*
*/
-unsigned char test_root_cert[] = {
+const char test_root_cert[] = {
0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x49, 0x7a, 0x43, 0x43,
@@ -98,5 +98,4 @@ unsigned char test_root_cert[] = {
0x31, 0x59, 0x75, 0x58, 0x32, 0x72, 0x6e, 0x65, 0x78, 0x30, 0x4a, 0x68,
0x75, 0x54, 0x51, 0x66, 0x63, 0x49, 0x3d, 0x0a, 0x2d, 0x2d, 0x2d, 0x2d,
0x2d, 0x45, 0x4e, 0x44, 0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49,
- 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a};
-unsigned int test_root_cert_size = 802;
+ 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x00};
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
index 7718d30b3e..e5cdec8ea7 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
@@ -98,8 +98,8 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) {
static void chttp2_init_client_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
- grpc_credentials *ssl_creds = grpc_ssl_credentials_create(
- test_root_cert, test_root_cert_size, NULL, 0, NULL, 0);
+ grpc_credentials *ssl_creds =
+ grpc_ssl_credentials_create(test_root_cert, NULL);
grpc_arg ssl_name_override = {GRPC_ARG_STRING,
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
{"foo.test.google.com"}};
@@ -111,9 +111,10 @@ static void chttp2_init_client_simple_ssl_secure_fullstack(
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *server_args) {
- grpc_server_credentials *ssl_creds = grpc_ssl_server_credentials_create(
- NULL, 0, test_server1_key, test_server1_key_size, test_server1_cert,
- test_server1_cert_size);
+ grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key,
+ test_server1_cert};
+ grpc_server_credentials *ssl_creds =
+ grpc_ssl_server_credentials_create(NULL, &pem_cert_key_pair, 1);
chttp2_init_server_secure_fullstack(f, server_args, ssl_creds);
}
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
index bb8af88c54..8bfa465696 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
@@ -99,8 +99,7 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) {
static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
- grpc_credentials *ssl_creds = grpc_ssl_credentials_create(
- test_root_cert, test_root_cert_size, NULL, 0, NULL, 0);
+ grpc_credentials *ssl_creds = grpc_ssl_credentials_create(test_root_cert, NULL);
grpc_credentials *oauth2_creds =
grpc_fake_oauth2_credentials_create("Bearer aaslkfjs424535asdf", 1);
grpc_credentials *ssl_oauth2_creds =
@@ -118,9 +117,10 @@ static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
static void chttp2_init_server_simple_ssl_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *server_args) {
- grpc_server_credentials *ssl_creds = grpc_ssl_server_credentials_create(
- NULL, 0, test_server1_key, test_server1_key_size, test_server1_cert,
- test_server1_cert_size);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
+ test_server1_cert};
+ grpc_server_credentials *ssl_creds =
+ grpc_ssl_server_credentials_create(NULL, &pem_key_cert_pair, 1);
chttp2_init_server_secure_fullstack(f, server_args, ssl_creds);
}
diff --git a/test/core/fling/server.c b/test/core/fling/server.c
index d68fbe7c3c..f811aac284 100644
--- a/test/core/fling/server.c
+++ b/test/core/fling/server.c
@@ -101,9 +101,10 @@ int main(int argc, char **argv) {
cq = grpc_completion_queue_create();
if (secure) {
- grpc_server_credentials *ssl_creds = grpc_ssl_server_credentials_create(
- NULL, 0, test_server1_key, test_server1_key_size, test_server1_cert,
- test_server1_cert_size);
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
+ test_server1_cert};
+ grpc_server_credentials *ssl_creds =
+ grpc_ssl_server_credentials_create(NULL, &pem_key_cert_pair, 1);
server = grpc_secure_server_create(ssl_creds, cq, NULL);
GPR_ASSERT(grpc_server_add_secure_http2_port(server, addr));
grpc_server_credentials_release(ssl_creds);
diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c
index 136f34a8b0..05c91ffdd4 100644
--- a/test/core/iomgr/fd_posix_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -37,6 +37,7 @@
#include <errno.h>
#include <fcntl.h>
#include <netinet/in.h>
+#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -79,7 +80,7 @@ static void create_test_socket(int port, int *socket_fd,
/* Use local address for test */
sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = 0;
+ sin->sin_addr.s_addr = htonl(0x7f000001);
sin->sin_port = htons(port);
}
@@ -164,7 +165,7 @@ static void session_read_cb(void *arg, /*session*/
grpc_fd_notify_on_read(se->em_fd, session_read_cb, se);
} else {
gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno));
- GPR_ASSERT(0);
+ abort();
}
}
}
@@ -316,7 +317,7 @@ static void client_session_write(void *arg, /*client*/
gpr_mu_unlock(&cl->mu);
} else {
gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno));
- GPR_ASSERT(0);
+ abort();
}
}
@@ -325,10 +326,20 @@ static void client_start(client *cl, int port) {
int fd;
struct sockaddr_in sin;
create_test_socket(port, &fd, &sin);
- if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1 &&
- errno != EINPROGRESS) {
- gpr_log(GPR_ERROR, "Failed to connect to the server");
- GPR_ASSERT(0);
+ if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
+ if (errno == EINPROGRESS) {
+ struct pollfd pfd;
+ pfd.fd = fd;
+ pfd.events = POLLOUT;
+ pfd.revents = 0;
+ if (poll(&pfd, 1, -1) == -1) {
+ gpr_log(GPR_ERROR, "poll() failed during connect; errno=%d", errno);
+ abort();
+ }
+ } else {
+ gpr_log(GPR_ERROR, "Failed to connect to the server (errno=%d)", errno);
+ abort();
+ }
}
cl->em_fd = grpc_fd_create(fd);
diff --git a/test/core/iomgr/poll_kick_test.c b/test/core/iomgr/poll_kick_test.c
index 9f0d0f38b3..c30a7b9ee0 100644
--- a/test/core/iomgr/poll_kick_test.c
+++ b/test/core/iomgr/poll_kick_test.c
@@ -33,16 +33,17 @@
#include "src/core/iomgr/pollset_kick.h"
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
-static void test_allocation() {
+static void test_allocation(void) {
grpc_pollset_kick_state state;
grpc_pollset_kick_init(&state);
grpc_pollset_kick_destroy(&state);
}
-static void test_non_kick() {
+static void test_non_kick(void) {
grpc_pollset_kick_state state;
int fd;
@@ -54,7 +55,7 @@ static void test_non_kick() {
grpc_pollset_kick_destroy(&state);
}
-static void test_basic_kick() {
+static void test_basic_kick(void) {
/* Kicked during poll */
grpc_pollset_kick_state state;
int fd;
@@ -73,7 +74,7 @@ static void test_basic_kick() {
grpc_pollset_kick_destroy(&state);
}
-static void test_non_poll_kick() {
+static void test_non_poll_kick(void) {
/* Kick before entering poll */
grpc_pollset_kick_state state;
int fd;
@@ -86,6 +87,26 @@ static void test_non_poll_kick() {
grpc_pollset_kick_destroy(&state);
}
+#define GRPC_MAX_CACHED_PIPES 50
+
+static void test_over_free(void) {
+ /* Check high watermark pipe free logic */
+ int i;
+ struct grpc_pollset_kick_state *kick_state =
+ gpr_malloc(sizeof(grpc_pollset_kick_state) * GRPC_MAX_CACHED_PIPES);
+ for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) {
+ int fd;
+ grpc_pollset_kick_init(&kick_state[i]);
+ fd = grpc_pollset_kick_pre_poll(&kick_state[i]);
+ GPR_ASSERT(fd >= 0);
+ }
+
+ for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) {
+ grpc_pollset_kick_post_poll(&kick_state[i]);
+ grpc_pollset_kick_destroy(&kick_state[i]);
+ }
+}
+
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
@@ -95,6 +116,7 @@ int main(int argc, char **argv) {
test_basic_kick();
test_non_poll_kick();
test_non_kick();
+ test_over_free();
grpc_pollset_kick_global_destroy();
return 0;
diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
index 26a4bc67e6..319ee634d6 100644
--- a/test/core/iomgr/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -32,6 +32,7 @@
*/
#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/iomgr.h"
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -122,7 +123,7 @@ static void test_unparseable_hostports(void) {
int main(int argc, char** argv) {
grpc_test_init(argc, argv);
-
+ grpc_iomgr_init();
test_localhost();
test_default_port();
test_missing_default_port();
@@ -130,6 +131,6 @@ int main(int argc, char** argv) {
test_ipv6_without_port();
test_invalid_ip_addresses();
test_unparseable_hostports();
-
+ grpc_iomgr_shutdown();
return 0;
}
diff --git a/test/core/iomgr/sockaddr_utils_test.c b/test/core/iomgr/sockaddr_utils_test.c
index 14018ed66c..3e653da4c9 100644
--- a/test/core/iomgr/sockaddr_utils_test.c
+++ b/test/core/iomgr/sockaddr_utils_test.c
@@ -213,9 +213,9 @@ static void test_sockaddr_to_string(void) {
expect_sockaddr_str("[::fffe:c000:263]:12345", &input6, 1);
memset(&dummy, 0, sizeof(dummy));
- dummy.sa_family = 999;
- expect_sockaddr_str("(sockaddr family=999)", &dummy, 0);
- expect_sockaddr_str("(sockaddr family=999)", &dummy, 1);
+ dummy.sa_family = 123;
+ expect_sockaddr_str("(sockaddr family=123)", &dummy, 0);
+ expect_sockaddr_str("(sockaddr family=123)", &dummy, 1);
GPR_ASSERT(errno == 0xDEADBEEF);
}
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
index 79ba777e85..f9212e7373 100644
--- a/test/core/iomgr/tcp_client_posix_test.c
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -40,6 +40,7 @@
#include <unistd.h>
#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/socket_utils_posix.h"
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@@ -138,7 +139,8 @@ void test_times_out(void) {
/* tie up the listen buffer, which is somewhat arbitrarily sized. */
for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
- client_fd[i] = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ client_fd[i] = socket(AF_INET, SOCK_STREAM, 0);
+ grpc_set_socket_nonblocking(client_fd[i], 1);
do {
r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
} while (r == -1 && errno == EINTR);
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index 619f09308e..9c60f4c233 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -48,7 +48,7 @@ static const char test_iam_authorization_token[] = "blahblahblhahb";
static const char test_iam_authority_selector[] = "respectmyauthoritah";
static const char test_oauth2_bearer_token[] =
"Bearer blaaslkdjfaslkdfasdsfasf";
-static const unsigned char test_root_cert[] = {0xDE, 0xAD, 0xBE, 0xEF};
+static const char test_root_cert[] = "I am the root!";
/* This JSON key was generated with the GCE console and revoked immediately.
The identifiers have been changed as well.
@@ -275,8 +275,8 @@ static void check_ssl_oauth2_composite_metadata(
}
static void test_ssl_oauth2_composite_creds(void) {
- grpc_credentials *ssl_creds = grpc_ssl_credentials_create(
- test_root_cert, sizeof(test_root_cert), NULL, 0, NULL, 0);
+ grpc_credentials *ssl_creds =
+ grpc_ssl_credentials_create(test_root_cert, NULL);
const grpc_credentials_array *creds_array;
grpc_credentials *oauth2_creds =
grpc_fake_oauth2_credentials_create(test_oauth2_bearer_token, 0);
@@ -312,8 +312,8 @@ static void check_ssl_oauth2_iam_composite_metadata(
}
static void test_ssl_oauth2_iam_composite_creds(void) {
- grpc_credentials *ssl_creds = grpc_ssl_credentials_create(
- test_root_cert, sizeof(test_root_cert), NULL, 0, NULL, 0);
+ grpc_credentials *ssl_creds =
+ grpc_ssl_credentials_create(test_root_cert, NULL);
const grpc_credentials_array *creds_array;
grpc_credentials *oauth2_creds =
grpc_fake_oauth2_credentials_create(test_oauth2_bearer_token, 0);
diff --git a/test/cpp/client/credentials_test.cc b/test/cpp/client/credentials_test.cc
index ea088b87bd..174d2187b0 100644
--- a/test/cpp/client/credentials_test.cc
+++ b/test/cpp/client/credentials_test.cc
@@ -45,15 +45,6 @@ class CredentialsTest : public ::testing::Test {
protected:
};
-TEST_F(CredentialsTest, InvalidSslCreds) {
- std::unique_ptr<Credentials> bad1 =
- CredentialsFactory::SslCredentials({"", "", ""});
- EXPECT_EQ(nullptr, bad1.get());
- std::unique_ptr<Credentials> bad2 =
- CredentialsFactory::SslCredentials({"", "bla", "bla"});
- EXPECT_EQ(nullptr, bad2.get());
-}
-
TEST_F(CredentialsTest, InvalidServiceAccountCreds) {
std::unique_ptr<Credentials> bad1 =
CredentialsFactory::ServiceAccountCredentials("", "",
diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc
index 04cfeb86cb..2a18ddb72e 100644
--- a/test/cpp/interop/client.cc
+++ b/test/cpp/interop/client.cc
@@ -188,7 +188,7 @@ void DoResponseStreamingWithSlowConsumer(
grpc::ClientContext context;
StreamingOutputCallRequest request;
- for (unsigned int i = 0; i < kNumResponseMessages; ++i) {
+ for (int i = 0; i < kNumResponseMessages; ++i) {
ResponseParameters* response_parameter = request.add_response_parameters();
response_parameter->set_size(kResponseMessageSize);
}
@@ -196,7 +196,7 @@ void DoResponseStreamingWithSlowConsumer(
std::unique_ptr<grpc::ClientReader<StreamingOutputCallResponse>> stream(
stub->StreamingOutputCall(&context, &request));
- unsigned int i = 0;
+ int i = 0;
while (stream->Read(&response)) {
GPR_ASSERT(response.payload().body() ==
grpc::string(kResponseMessageSize, '\0'));
diff --git a/test/cpp/interop/server.cc b/test/cpp/interop/server.cc
index 561b134c27..5b5c35416c 100644
--- a/test/cpp/interop/server.cc
+++ b/test/cpp/interop/server.cc
@@ -203,11 +203,7 @@ void RunServer() {
builder.RegisterService(service.service());
if (FLAGS_enable_ssl) {
SslServerCredentialsOptions ssl_opts = {
- "",
- {reinterpret_cast<const char*>(test_server1_key),
- test_server1_key_size},
- {reinterpret_cast<const char*>(test_server1_cert),
- test_server1_cert_size}};
+ "", {{test_server1_key, test_server1_cert}}};
std::shared_ptr<ServerCredentials> creds =
ServerCredentialsFactory::SslCredentials(ssl_opts);
builder.SetCredentials(creds);
diff --git a/test/cpp/util/create_test_channel.cc b/test/cpp/util/create_test_channel.cc
index f0d35d98c2..68f6244a53 100644
--- a/test/cpp/util/create_test_channel.cc
+++ b/test/cpp/util/create_test_channel.cc
@@ -56,11 +56,8 @@ std::shared_ptr<ChannelInterface> CreateTestChannel(
ChannelArguments channel_args;
if (enable_ssl) {
const char* roots_certs =
- use_prod_roots ? reinterpret_cast<const char*>(prod_roots_certs)
- : reinterpret_cast<const char*>(test_root_cert);
- unsigned int roots_certs_size =
- use_prod_roots ? prod_roots_certs_size : test_root_cert_size;
- SslCredentialsOptions ssl_opts = {{roots_certs, roots_certs_size}, "", ""};
+ use_prod_roots ? prod_roots_certs : test_root_cert;
+ SslCredentialsOptions ssl_opts = {roots_certs, "", ""};
std::unique_ptr<Credentials> creds =
CredentialsFactory::SslCredentials(ssl_opts);
diff --git a/tools/dockerfile/grpc_base/Dockerfile b/tools/dockerfile/grpc_base/Dockerfile
index 45be172593..be1b69b0dc 100644
--- a/tools/dockerfile/grpc_base/Dockerfile
+++ b/tools/dockerfile/grpc_base/Dockerfile
@@ -13,6 +13,7 @@ RUN apt-get update && apt-get install -y \
libc6 \
libc6-dbg \
libc6-dev \
+ libgtest-dev \
libtool \
make \
strace \
@@ -34,23 +35,13 @@ ENV CLOUD_SDK /google-cloud-sdk
RUN $CLOUD_SDK/install.sh --usage-reporting=true --path-update=true --bash-completion=true --rc-path=/.bashrc --disable-installation-options
ENV PATH $CLOUD_SDK/bin:$PATH
-# Install gcompute-tools to allow access to private git-on-borg repos
-RUN git clone https://gerrit.googlesource.com/gcompute-tools /var/local/git/gcompute-tools
-
-# Start the daemon that allows access to private git-on-borg repos
-RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
-
-# Install the grpc-tools scripts dir from git
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc-tools /var/local/git/grpc-tools
-
-# Install the grpc-protobuf dir that has the protoc patch
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/protobuf /var/local/git/protobuf
-
-# Install the patched version of protoc
-RUN cd /var/local/git/protobuf && \
- ./autogen.sh && \
- ./configure --prefix=/usr && \
- make && make check && make install && make clean
+# Install a GitHub SSH service credential that gives access to the GitHub repo while it's private
+# TODO: remove this once the repo is public
+ADD .ssh .ssh
+RUN chmod 600 .ssh/github.rsa
+RUN mkdir -p $HOME/.ssh && echo 'Host github.com' > $HOME/.ssh/config
+RUN echo " IdentityFile /.ssh/github.rsa" >> $HOME/.ssh/config
+RUN echo 'StrictHostKeyChecking no' >> $HOME/.ssh/config
# Define the default command.
CMD ["bash"]
diff --git a/tools/dockerfile/grpc_cxx/Dockerfile b/tools/dockerfile/grpc_cxx/Dockerfile
index cf38e976b1..ea3a1dba8f 100644
--- a/tools/dockerfile/grpc_cxx/Dockerfile
+++ b/tools/dockerfile/grpc_cxx/Dockerfile
@@ -1,15 +1,18 @@
# Dockerfile for gRPC C++
FROM grpc/base
-# Start the daemon that allows access to the protected git-on-borg repos
-RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
-
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
-RUN cd /var/local/git/grpc \
- && git pull --recurse-submodules \
- && git submodule update --init --recursive
+# Get the source from GitHub
+RUN git clone git@github.com:google/grpc.git /var/local/git/grpc
+RUN cd /var/local/git/grpc && \
+ git pull --recurse-submodules && \
+ git submodule update --init --recursive
+# Build the protobuf library; then the C core.
+RUN cd /var/local/git/grpc/third_party/protobuf && \
+ ./autogen.sh && \
+ ./configure --prefix=/usr && \
+ make -j12 && make check && make install && make clean
RUN make install -C /var/local/git/grpc
# Define the default command.
-CMD ["bash"] \ No newline at end of file
+CMD ["bash"]
diff --git a/tools/dockerfile/grpc_go/Dockerfile b/tools/dockerfile/grpc_go/Dockerfile
new file mode 100644
index 0000000000..ab463b2a00
--- /dev/null
+++ b/tools/dockerfile/grpc_go/Dockerfile
@@ -0,0 +1,27 @@
+# Dockerfile for gRPC Go
+FROM golang:1.4
+
+# Install SSH to that Go source can be pulled securely.
+RUN apt-get update && apt-get install -y ssh
+
+# Install a GitHub SSH service credential that gives access to the GitHub repo while it's private
+#
+# TODO: remove this once the repo is public
+ADD .ssh .ssh
+RUN chmod 600 /.ssh/github.rsa
+RUN mkdir -p $HOME/.ssh && echo 'Host github.com' > $HOME/.ssh/config
+RUN echo " IdentityFile /.ssh/github.rsa" >> $HOME/.ssh/config
+RUN echo 'StrictHostKeyChecking no' >> $HOME/.ssh/config
+
+# Force go get to use the GitHub ssh url instead of https, and use the SSH creds
+RUN git config --global url."git@github.com:".insteadOf "https://github.com/"
+
+# Get the source from GitHub
+RUN go get github.com/google/grpc-go
+
+# Build the interop client and server
+RUN cd src/github.com/google/grpc-go/interop/client && go install
+RUN cd src/github.com/google/grpc-go/interop/server && go install
+
+# Specify the default command such that the interop server runs on its known testing port
+CMD ["/bin/bash", "-c 'cd src/github.com/google/grpc-go/interop/server && go run server.go --use_tls=true --port=8020'"]
diff --git a/tools/dockerfile/grpc_go/README.md b/tools/dockerfile/grpc_go/README.md
new file mode 100644
index 0000000000..0d6ad3e391
--- /dev/null
+++ b/tools/dockerfile/grpc_go/README.md
@@ -0,0 +1,4 @@
+GRPC Go Dockerfile
+==================
+
+Dockerfile for gRPC Go development, testing and deployment.
diff --git a/tools/dockerfile/grpc_php/Dockerfile b/tools/dockerfile/grpc_php/Dockerfile
index 0e50af70a2..177587669c 100644
--- a/tools/dockerfile/grpc_php/Dockerfile
+++ b/tools/dockerfile/grpc_php/Dockerfile
@@ -1,9 +1,6 @@
# Dockerfile for gRPC PHP
FROM grpc/php_base
-# Start the daemon that allows access to the protected git-on-borg repos
-RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
-
RUN cd /var/local/git/grpc \
&& git pull --recurse-submodules \
&& git submodule update --init --recursive
@@ -15,4 +12,4 @@ RUN cd /var/local/git/grpc/src/php/ext/grpc && git pull && phpize
# Build the grpc PHP extension
RUN cd /var/local/git/grpc/src/php/ext/grpc \
&& ./configure \
- && make \ No newline at end of file
+ && make
diff --git a/tools/dockerfile/grpc_php_base/Dockerfile b/tools/dockerfile/grpc_php_base/Dockerfile
index 8ec90f48b8..47266a310e 100644
--- a/tools/dockerfile/grpc_php_base/Dockerfile
+++ b/tools/dockerfile/grpc_php_base/Dockerfile
@@ -43,9 +43,10 @@ RUN cd /var/local \
&& tar -xf php-5.5.17.tar.gz \
&& cd php-5.5.17 \
&& ./configure --with-zlib=/usr --with-libxml-dir=ext/libxml \
- && make && make install
+ && make -j12 && make install
# Start the daemon that allows access to the protected git-on-borg repos
+RUN git clone https://gerrit.googlesource.com/gcompute-tools /var/local/git/gcompute-tools
RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
# Download the patched PHP protobuf so that PHP gRPC clients can be generated
@@ -64,6 +65,18 @@ ENV PATH /usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/s
# rake: a ruby version of make used to build the PHP Protobuf extension
RUN rvm all do gem install ronn rake
+# Get the source from GitHub, this gets the protobuf library as well
+RUN git clone git@github.com:google/grpc.git /var/local/git/grpc
+RUN cd /var/local/git/grpc && \
+ git pull --recurse-submodules && \
+ git submodule update --init --recursive
+
+# Build and install the protobuf library
+RUN cd /var/local/git/grpc/third_party/protobuf && \
+ ./autogen.sh && \
+ ./configure --prefix=/usr && \
+ make -j12 && make check && make install && make clean
+
# Install the patched PHP protobuf so that PHP gRPC clients can be generated
# from proto3 schemas.
RUN cd /var/local/git/protobuf-php \
@@ -75,10 +88,7 @@ RUN wget https://phar.phpunit.de/phpunit.phar \
&& chmod +x phpunit.phar \
&& mv phpunit.phar /usr/local/bin/phpunit
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
-RUN cd /var/local/git/grpc \
- && git submodule update --init --recursive
-
+# Build the C core
RUN make static_c shared_c -j12 -C /var/local/git/grpc
# Define the default command.
diff --git a/tools/dockerfile/grpc_ruby/Dockerfile b/tools/dockerfile/grpc_ruby/Dockerfile
index 9aa34bfcc9..43ec0183ca 100644
--- a/tools/dockerfile/grpc_ruby/Dockerfile
+++ b/tools/dockerfile/grpc_ruby/Dockerfile
@@ -1,19 +1,14 @@
# Dockerfile for gRPC Ruby
FROM grpc/ruby_base
-# Start the daemon that allows access to the protected git-on-borg repos
-RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
-
+# Build the C libary
RUN cd /var/local/git/grpc \
&& git pull --recurse-submodules \
&& git submodule update --init --recursive
+# Build the C core.
RUN make install_c -C /var/local/git/grpc
-# Install the grpc gem locally with its dependencies and build the extension.
-RUN /bin/bash -l -c 'cd /var/local/git/beefcake && bundle && gem build beefcake.gemspec && gem install beefcake'
-RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake compile:grpc && gem build grpc.gemspec && gem install grpc'
-
# TODO add a command to run the unittest tests when the bug below is fixed
# - the tests fail due to an error in the C threading library:
# they fail with 'ruby: __pthread_mutex_cond_lock_adjust for unknown reasons' at the end of a testcase
diff --git a/tools/dockerfile/grpc_ruby_base/Dockerfile b/tools/dockerfile/grpc_ruby_base/Dockerfile
index ad14e43ec7..b2af9d7160 100644
--- a/tools/dockerfile/grpc_ruby_base/Dockerfile
+++ b/tools/dockerfile/grpc_ruby_base/Dockerfile
@@ -31,15 +31,6 @@ RUN apt-get update && apt-get install -y \
sqlite3 \
zlib1g-dev
-
-# Start the daemon that allows access to the protected git-on-borg repos
-RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
-
-# Download the patched Ruby protobuf (beefcake) so that Ruby gRPC clients can
-# be generated from proto3 schemas.
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc-ruby-beefcake \
- /var/local/git/beefcake
-
# Install RVM, use this to install ruby
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3 # Needed for RVM
RUN /bin/bash -l -c "curl -L get.rvm.io | bash -s stable"
@@ -52,8 +43,17 @@ RUN /bin/bash -l -c "echo 'source /home/grpc_ruby/.rvm/scripts/rvm' >> ~/.bashrc
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
-RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
-RUN cd /var/local/git/grpc \
- && git submodule update --init --recursive
+# Get the source from GitHub
+RUN git clone git@github.com:google/grpc.git /var/local/git/grpc
+RUN cd /var/local/git/grpc && \
+ git pull --recurse-submodules && \
+ git submodule update --init --recursive
+
+# Build and install the protobuf library
+RUN cd /var/local/git/grpc/third_party/protobuf && \
+ ./autogen.sh && \
+ ./configure --prefix=/usr && \
+ make -j12 && make check && make install && make clean
-RUN make static_c shared_c -C /var/local/git/grpc \ No newline at end of file
+# Build the C core
+RUN make static_c shared_c -j12 -C /var/local/git/grpc
diff --git a/tools/gce_setup/grpc_docker.sh b/tools/gce_setup/grpc_docker.sh
index bf776126b5..bfa4c808be 100755
--- a/tools/gce_setup/grpc_docker.sh
+++ b/tools/gce_setup/grpc_docker.sh
@@ -86,6 +86,7 @@ grpc_add_docker_user() {
}
_grpc_update_image_args() {
+ echo "image_args $@"
# default the host, root storage uri and docker file root
grpc_gs_root='gs://tmp-grpc-dev/admin/'
grpc_dockerfile_root='tools/dockerfile'
@@ -95,7 +96,7 @@ _grpc_update_image_args() {
# see if -p or -z is used to override the the project or zone
local OPTIND
local OPTARG
- while getopts :r:d:h name
+ while getopts :r:d:h: name
do
case $name in
d) grpc_dockerfile_root=$OPTARG ;;
@@ -261,7 +262,7 @@ _grpc_set_project_and_zone() {
local OPTIND
local OPTARG
local arg_func
- while getopts :p:z:f:n name
+ while getopts :np:z:f: name
do
case $name in
f) declare -F $OPTARG >> /dev/null && {
@@ -392,6 +393,65 @@ grpc_interop_test_args() {
}
}
+_grpc_sync_scripts_args() {
+ grpc_gce_script_root='tools/gce_setup'
+
+ local OPTIND
+ local OPTARG
+ while getopts :s: name
+ do
+ case $name in
+ s) grpc_gce_script_root=$OPTARG ;;
+ :) continue ;; # ignore -s without args, just use the defaults
+ \?) echo "-$OPTARG: unknown flag; it's ignored" 1>&2; continue ;;
+ esac
+ done
+ shift $((OPTIND-1))
+
+ [[ -d $grpc_gce_script_root ]] || {
+ echo "Could not locate gce script dir: $grpc_gce_script_root" 1>&2
+ return 1
+ }
+
+ [[ $# -lt 1 ]] && {
+ echo "$FUNCNAME: missing arg: host1 [host2 ... hostN]" 1>&2
+ return 1
+ }
+ grpc_hosts="$@"
+}
+
+# Updates the latest version of the support scripts on some hosts.
+#
+# call-seq;
+# grpc_sync_scripts <server_name1>, <server_name2> .. <server_name3>
+#
+# Updates the GCE docker instance <server_name>
+grpc_sync_scripts() {
+ _grpc_ensure_gcloud_ssh || return 1;
+
+ # declare vars local so that they don't pollute the shell environment
+ # where they this func is used.
+ local grpc_zone grpc_project dry_run # set by _grpc_set_project_and_zone
+ local grpc_hosts grpc_gce_script_root
+
+ # set the project zone and check that all necessary args are provided
+ _grpc_set_project_and_zone -f _grpc_sync_scripts_args "$@" || return 1
+
+ local func_lib="shared_startup_funcs.sh"
+ local gce_func_lib="/var/local/startup_scripts/$func_lib"
+ local project_opt="--project $grpc_project"
+ local zone_opt="--zone $grpc_zone"
+ local host
+ for host in $grpc_hosts
+ do
+ gce_has_instance $grpc_project $host || return 1;
+ # Update the remote copy of the GCE func library.
+ local src_func_lib="$grpc_gce_script_root/$func_lib"
+ local rmt_func_lib="$host:$gce_func_lib"
+ gcloud compute copy-files $src_func_lib $rmt_func_lib $project_opt $zone_opt || return 1
+ done
+}
+
grpc_sync_images_args() {
[[ $# -lt 1 ]] && {
echo "$FUNCNAME: missing arg: host1 [host2 ... hostN]" 1>&2
@@ -412,7 +472,6 @@ grpc_sync_images() {
# declare vars local so that they don't pollute the shell environment
# where they this func is used.
local grpc_zone grpc_project dry_run # set by _grpc_set_project_and_zone
- # set by grpc_sync_images
local grpc_hosts
# set the project zone and check that all necessary args are provided
@@ -425,7 +484,7 @@ grpc_sync_images() {
local host
for host in $grpc_hosts
do
- gce_has_instance $grpc_project $h || return 1;
+ gce_has_instance $grpc_project $host || return 1;
local ssh_cmd="bash -l -c \"$cmd\""
echo "will run:"
echo " $ssh_cmd"
@@ -575,6 +634,18 @@ grpc_interop_gen_ruby_cmd() {
echo $the_cmd
}
+# constructs the full dockerized Go interop test cmd.
+#
+# call-seq:
+# flags= .... # generic flags to include the command
+# cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_go_cmd() {
+ local cmd_prefix="sudo docker run grpc/go bin/bash -c";
+ local test_script="cd /go/src/github.com/google/grpc-go/interop/client";
+ local test_script+=" && go run client.go --use_tls=true";
+ local the_cmd="$cmd_prefix '$test_script $@ 1>&2'";
+}
+
# constructs the full dockerized java interop test cmd.
#
# call-seq:
@@ -605,4 +676,4 @@ grpc_interop_gen_php_cmd() {
}
-# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs|go
+# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs
diff --git a/tools/gce_setup/shared_startup_funcs.sh b/tools/gce_setup/shared_startup_funcs.sh
index 9ea6eca461..f1dbca9a2e 100755
--- a/tools/gce_setup/shared_startup_funcs.sh
+++ b/tools/gce_setup/shared_startup_funcs.sh
@@ -367,11 +367,12 @@ grpc_docker_launch_registry() {
grpc_docker_pull_known() {
local addr=$1
[[ -n $addr ]] || addr="0.0.0.0:5000"
- local known="base cxx php_base php ruby_base ruby java_base java"
+ local known="base cxx php_base php ruby_base ruby java_base java go"
echo "... pulling docker images for '$known'"
for i in $known
do
- sudo docker pull ${addr}/grpc/$i \
+ echo "<--- grpc/$i"
+ sudo docker pull ${addr}/grpc/$i > /dev/null 2>&1 \
&& sudo docker tag ${addr}/grpc/$i grpc/$i || {
# log and continue
echo "docker op error: could not pull ${addr}/grpc/$i"
@@ -388,7 +389,7 @@ grpc_docker_pull_known() {
# grpc_dockerfile_install "grpc/image" /var/local/dockerfile/grpc_image
grpc_dockerfile_install() {
local image_label=$1
- [[ -n $image_label ]] || { echo "missing arg: image_label" >&2; return 1; }
+ [[ -n $image_label ]] || { echo "$FUNCNAME: missing arg: image_label" >&2; return 1; }
local docker_img_url=0.0.0.0:5000/$image_label
local dockerfile_dir=$2
@@ -400,19 +401,30 @@ grpc_dockerfile_install() {
[[ $cache == "cache=1" ]] && { cache_opt=''; }
[[ $cache == "cache=true" ]] && { cache_opt=''; }
- [[ -d $dockerfile_dir ]] || { echo "not a valid dir: $dockerfile_dir"; return 1; }
+ [[ -d $dockerfile_dir ]] || { echo "$FUNCNAME: not a valid dir: $dockerfile_dir"; return 1; }
+
+ # For specific base images, sync the ssh key into the .ssh dir in the dockerfile context
+ [[ $image_label == "grpc/base" ]] && {
+ grpc_docker_sync_github_key $dockerfile_dir/.ssh 'base_ssh_key'|| return 1;
+ }
+ [[ $image_label == "grpc/go" ]] && {
+ grpc_docker_sync_github_key $dockerfile_dir/.ssh 'go_ssh_key'|| return 1;
+ }
+ [[ $image_label == "grpc/java_base" ]] && {
+ grpc_docker_sync_github_key $dockerfile_dir/.ssh 'java_base_ssh_key'|| return 1;
+ }
# TODO(temiola): maybe make cache/no-cache a func option?
sudo docker build $cache_opt -t $image_label $dockerfile_dir || {
- echo "docker op error: build of $image_label <- $dockerfile_dir"
+ echo "$FUNCNAME:: build of $image_label <- $dockerfile_dir"
return 1
}
sudo docker tag $image_label $docker_img_url || {
- echo "docker op error: tag of $docker_img_url"
+ echo "$FUNCNAME: failed to tag $docker_img_url as $image_label"
return 1
}
sudo docker push $docker_img_url || {
- echo "docker op error: push of $docker_img_url"
+ echo "$FUNCNAME: failed to push $docker_img_url"
return 1
}
}
@@ -428,3 +440,34 @@ grpc_dockerfile_install() {
grpc_dockerfile_refresh() {
grpc_dockerfile_install "$@"
}
+
+# grpc_docker_sync_github_key.
+#
+# Copies the docker github key from GCS to the target dir
+#
+# call-seq:
+# grpc_docker_sync_github_key <target_dir>
+grpc_docker_sync_github_key() {
+ local target_dir=$1
+ [[ -n $target_dir ]] || { echo "$FUNCNAME: missing arg: target_dir" >&2; return 1; }
+
+ local key_file=$2
+ [[ -n $key_file ]] || { echo "$FUNCNAME: missing arg: key_file" >&2; return 1; }
+
+ # determine the admin root; the parent of the dockerfile root,
+ local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root")
+ [[ -n $gs_dockerfile_root ]] || {
+ echo "$FUNCNAME: missing metadata: gs_dockerfile_root" >&2
+ return 1
+ }
+ local gcs_admin_root=$(dirname $gs_dockerfile_root)
+
+ # cp the file from gsutil to a known local area
+ local gcs_key_path=$gcs_admin_root/github/$key_file
+ local local_key_path=$target_dir/github.rsa
+ mkdir -p $target_dir || {
+ echo "$FUNCNAME: could not create dir: $target_dir" 1>&2
+ return 1
+ }
+ gsutil cp $src $gcs_key_path $local_key_path
+}
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
new file mode 100755
index 0000000000..6899ac7fe3
--- /dev/null
+++ b/tools/run_tests/build_python.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+# change to grpc repo root
+cd $(dirname $0)/../..
+
+root=`pwd`
+virtualenv python2.7_virtual_environment
+python2.7_virtual_environment/bin/pip install enum34==1.0.4 futures==2.2.0
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index 7a6d979ba3..8f16a4ff2c 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -160,8 +160,8 @@ class Jobset(object):
self._completed += 1
self._running.remove(job)
if dead: return
- message('WAITING', '%d jobs running, %d complete' % (
- len(self._running), self._completed))
+ message('WAITING', '%d jobs running, %d complete, %d failed' % (
+ len(self._running), self._completed, self._failures))
time.sleep(0.1)
def cancelled(self):
diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh
new file mode 100755
index 0000000000..0d5ed0238d
--- /dev/null
+++ b/tools/run_tests/run_python.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+# change to grpc repo root
+cd $(dirname $0)/../..
+
+root=`pwd`
+python2.7_virtual_environment/bin/python2.7 -B -m unittest discover -s src/python -p '*.py'
+python3.4 -B -m unittest discover -s src/python -p '*.py'
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 15c523731b..da849f04cb 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -75,6 +75,21 @@ class PhpLanguage(object):
return [['tools/run_tests/build_php.sh']]
+class PythonLanguage(object):
+
+ def __init__(self):
+ self.allow_hashing = False
+
+ def test_binaries(self, config):
+ return ['tools/run_tests/run_python.sh']
+
+ def make_targets(self):
+ return[]
+
+ def build_steps(self):
+ return [['tools/run_tests/build_python.sh']]
+
+
# different configurations we can run under
_CONFIGS = {
'dbg': SimpleConfig('dbg'),
@@ -92,7 +107,8 @@ _DEFAULT = ['dbg', 'opt']
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
- 'php': PhpLanguage()
+ 'php': PhpLanguage(),
+ 'python': PythonLanguage(),
}
# parse command line
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index ec838649e0..90571eaec6 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -263,6 +263,10 @@
},
{
"language": "c++",
+ "name": "tips_client_test"
+ },
+ {
+ "language": "c++",
"name": "status_test"
},
{