diff options
author | yang-g <yangg@google.com> | 2016-07-12 08:33:39 -0700 |
---|---|---|
committer | yang-g <yangg@google.com> | 2016-07-12 08:33:39 -0700 |
commit | 408ef6bf25bac1e1d45f39af4e031ac16d1699b6 (patch) | |
tree | d07e130c7cacd511230ede6a61b8bc85171c1a82 | |
parent | 9241c6947fa924b6d961d2e90e1a4f7293ec727e (diff) | |
parent | 2f17797c943b2b58865713b1ffd18e586d3fed02 (diff) |
Merge remote-tracking branch 'upstream/master' into sequential_con
199 files changed, 4895 insertions, 6517 deletions
diff --git a/.gitmodules b/.gitmodules index c85a53943a..ce647f3c45 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,7 +4,7 @@ [submodule "third_party/protobuf"] path = third_party/protobuf url = https://github.com/google/protobuf.git - branch = v3.0.0-beta-2 + branch = 3.0.0-beta-3 [submodule "third_party/gflags"] path = third_party/gflags url = https://github.com/gflags/gflags.git diff --git a/.travis.yml b/.travis.yml index fcdfd8b2bf..7576e076a0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,9 +39,6 @@ before_install: - gem install cocoapods -v '1.0.0' - pod --version - brew install gflags - - pushd third_party/protobuf - - git checkout v3.0.0-beta-3 - - popd install: - make grpc_objective_c_plugin - install bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc @@ -1603,6 +1603,7 @@ cc_library( "//external:protobuf_clib", ":gpr", ":grpc_unsecure", + ":grpc", ], ) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9caf03191f..2c0059cd2d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,7 +42,7 @@ cmake_minimum_required(VERSION 2.8) set(PACKAGE_NAME "grpc") -set(PACKAGE_VERSION "0.16.0-dev") +set(PACKAGE_VERSION "1.1.0-dev") set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}") set(PACKAGE_TARNAME "${PACKAGE_NAME}-${PACKAGE_VERSION}") set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/") @@ -58,6 +58,13 @@ if(NOT ZLIB_ROOT_DIR) set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) endif() +# Building the protobuf tests require gmock what is not part of a standard protobuf checkout. +# Disable them unless they are explicitly requested from the cmake command line (when we assume +# gmock is downloaded to the right location inside protobuf). +if(NOT protobuf_BUILD_TESTS) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") +endif() + add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl) add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib) @@ -802,6 +809,7 @@ target_link_libraries(grpc++_unsecure libprotobuf gpr grpc_unsecure + grpc ) @@ -415,7 +415,7 @@ E = @echo Q = @ endif -VERSION = 0.16.0-dev +VERSION = 1.1.0-dev CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES)) CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS) @@ -448,7 +448,7 @@ PC_TEMPLATE = prefix=$(prefix),exec_prefix=\$${prefix},includedir=\$${prefix}/in ifeq ($(SYSTEM),MINGW32) SHARED_EXT = dll SHARED_PREFIX = -SHARED_VERSION = -0 +SHARED_VERSION = -1 else ifeq ($(SYSTEM),Darwin) SHARED_EXT = dylib SHARED_PREFIX = lib @@ -2164,7 +2164,7 @@ install-shared_c: shared_c strip-shared_c install-pkg-config_c ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgpr-imp.a $(prefix)/lib/libgpr-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.0 + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.1 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT)" @@ -2173,7 +2173,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc-imp.a $(prefix)/lib/libgrpc-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT)" @@ -2182,7 +2182,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_cronet-imp.a $(prefix)/lib/libgrpc_cronet-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)" @@ -2191,7 +2191,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure-imp.a $(prefix)/lib/libgrpc_unsecure-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2208,7 +2208,7 @@ install-shared_cxx: shared_cxx strip-shared_cxx install-shared_c install-pkg-con ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++-imp.a $(prefix)/lib/libgrpc++-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT)" @@ -2217,7 +2217,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection-imp.a $(prefix)/lib/libgrpc++_reflection-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT)" @@ -2226,7 +2226,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure-imp.a $(prefix)/lib/libgrpc++_unsecure-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2243,7 +2243,7 @@ install-shared_csharp: shared_csharp strip-shared_csharp ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext-imp.a $(prefix)/lib/libgrpc_csharp_ext-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so endif ifneq ($(SYSTEM),MINGW32) @@ -2436,8 +2436,8 @@ $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT): $(LIBGPR_OBJS) $(ZLI ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.0 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.1 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so endif endif @@ -2729,8 +2729,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_OBJS) $(Z ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so endif endif @@ -2978,8 +2978,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_CRO ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so endif endif @@ -3299,8 +3299,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_U ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so endif endif @@ -3569,8 +3569,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc - $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc + $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so endif endif @@ -3696,8 +3696,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT): $(LIBGR ifeq ($(SYSTEM),Darwin) $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ - $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ + $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so endif endif @@ -4035,19 +4035,19 @@ endif ifeq ($(SYSTEM),MINGW32) -$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc_unsecure.$(SHARED_EXT) +$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc_unsecure.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/grpc.$(SHARED_EXT) $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++_unsecure.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr-imp -lgrpc_unsecure-imp + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared grpc++_unsecure.def -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr-imp -lgrpc_unsecure-imp -lgrpc-imp else -$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT) +$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc.$(SHARED_EXT) $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` ifeq ($(SYSTEM),Darwin) - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure - $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc + $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so endif endif @@ -4476,8 +4476,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so endif endif diff --git a/PYTHON-MANIFEST.in b/PYTHON-MANIFEST.in index 635e77b875..175a47f157 100644 --- a/PYTHON-MANIFEST.in +++ b/PYTHON-MANIFEST.in @@ -7,6 +7,7 @@ graft include/grpc graft third_party/boringssl graft third_party/nanopb graft third_party/zlib +include src/python/grpcio/build.py include src/python/grpcio/commands.py include src/python/grpcio/grpc_version.py include src/python/grpcio/grpc_core_dependencies.py diff --git a/build.yaml b/build.yaml index 1c485fd5c9..071d4a8e1e 100644 --- a/build.yaml +++ b/build.yaml @@ -7,7 +7,7 @@ settings: '#3': Use "-preN" suffixes to identify pre-release versions '#4': Per-language overrides are possible with (eg) ruby_version tag here '#5': See the expand_version.py for all the quirks here - version: 0.16.0-dev + version: 1.1.0-dev filegroups: - name: census public_headers: @@ -712,6 +712,8 @@ filegroups: - src/cpp/util/status.cc - src/cpp/util/string_ref.cc - src/cpp/util/time.cc + deps: + - grpc uses: - grpc++_codegen_base - name: grpc++_codegen_base diff --git a/composer.json b/composer.json index 05ac003714..6e7f24b451 100644 --- a/composer.json +++ b/composer.json @@ -13,8 +13,10 @@ ], "require": { "php": ">=5.5.0", - "datto/protobuf-php": "dev-master", - "google/auth": "v0.7" + "datto/protobuf-php": "dev-master" + }, + "require-dev": { + "google/auth": "v0.9" }, "autoload": { "psr-4": { diff --git a/doc/compression_cookbook.md b/doc/compression_cookbook.md new file mode 100644 index 0000000000..c10a805f20 --- /dev/null +++ b/doc/compression_cookbook.md @@ -0,0 +1,133 @@ +# gRPC (Core) Compression Cookbook + +## Introduction + +This document describes compression as implemented by the gRPC C core. See [the +full compression specification](compression.md) for details. + +### Intended Audience + +Wrapped languages developers, for the purposes of supporting compression by +interacting with the C core. + +## Criteria for GA readiness + +1. Be able to set compression at [channel](#per-channel-settings), + [call](#per-call-settings) and [message](#per-message-settings) level. + In principle this API should be based on _compression levels_ as opposed to + algorithms. See the discussion [below](#level-vs-algorithms). +1. Have unit tests covering [the cases from the + spec](https://github.com/grpc/grpc/blob/master/doc/compression.md#test-cases). +1. Interop tests implemented and passing on Jenkins. The two relevant interop + test cases are + [large_compressed_unary](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#large_compressed_unary) + and + [server_compressed_streaming](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_compressed_streaming). + +## Summary Flowcharts + +The following flowcharts depict the evolution of a message, both _incoming_ and +_outgoing_, irrespective of the client/server character of the call. Aspects +still not symmetric between clients and servers (e.g. the [use of compression +levels](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms)) +are explicitly marked. The in-detail textual description for the different +scenarios is described in subsequent sections. + +## Incoming Messages + +![image](images/compression_cookbook_incoming.png) + +## Outgoing Messages + +![image](images/compression_cookbook_outgoing.png) + +## Levels vs Algorithms + +As mentioned in [the relevant discussion on the spec +document](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms), +compression _levels_ are the primary mechanism for compression selection _at the +server side_. In the future, it'll also be at the client side. The use of levels +abstracts away the intricacies of selecting a concrete algorithm supported by a +peer, on top of removing the burden of choice from the developer. +As of this writing (Q2 2016), clients can only specify compression _algorithms_. +Clients will support levels as soon as an automatic retry/negotiation mechanism +is in place. + +## Per Channel Settings + +Compression may be configured at channel creation. This is a convenience to +avoid having to repeatedly configure compression for every call. Note that any +compression setting on individual [calls](#per-call-settings) or +[messages](#per-message-settings) overrides channel settings. + +The following aspects can be configured at channel-creation time via channel arguments: + +#### Disable Compression _Algorithms_ + +Use the channel argument key +`GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +takes a 32 bit bitset value. A set bit means the algorithm with that enum value +according to `grpc_compression_algorithm` is _enabled_. +For example, `GRPC_COMPRESS_GZIP` currently has a numeric value of 2. To +enable/disable GZIP for a channel, one would set/clear the 3rd LSB (eg, 0b100 = +0x4). Note that setting/clearing 0th position, that corresponding to +`GRPC_COMPRESS_NONE`, has no effect, as no-compression (a.k.a. _identity_) is +always supported. +Incoming messages compressed (ie, encoded) with a disabled algorithm will result +in the call being closed with `GRPC_STATUS_UNIMPLEMENTED`. + +#### Default Compression _Level_ + +**(currently, Q2 2016, only applicable for server side channels. It's ignored +for clients.)** +Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +valued by an integer corresponding to a value from the `grpc_compression_level` +enum. + +#### Default Compression _Algorithm_ + +Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +valued by an integer corresponding to a value from the `grpc_compression_level` +enum. + +## Per Call Settings + +### Compression **Level** in Call Responses + +The server requests a compression level via initial metadata. The +`send_initial_metadata` `grpc_op` contains a `maybe_compression_level` field +with two fields, `is_set` and `compression_level`. The former must be set when +actively choosing a level to disambiguate the default value of zero (no +compression) from the proactive selection of no compression. + +The core will receive the request for the compression level and automatically +choose a compression algorithm based on its knowledge about the peer +(communicated by the client via the `grpc-accept-encoding` header. Note that the +absence of this header means no compression is supported by the client/peer). + +### Compression **Algorithm** in Call Responses + +**Server should avoid setting the compression algorithm directly**. Prefer +setting compression levels unless there's a _very_ compelling reason to choose +specific algorithms (benchmarking, testing). + +Selection of concrete compression algorithms is performed by adding a +`(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, <algorithm-name>)` key-value pair to the +initial metadata, where `GRPC_COMPRESS_REQUEST_ALGORITHM_KEY` is defined in +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +and `<algorithm-name>` is the human readable name of the algorithm as given in +[the HTTP2 spec](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md) +for `Message-Encoding` (e.g. gzip, identity, etc.). See +[`grpc_compression_algorithm_name`](https://github.com/grpc/grpc/blob/master/src/core/lib/compression/compression.c) +for the mapping between the `grpc_compression_algorithm` enum values and their +textual representation. + +## Per Message Settings + +To disable compression for a specific message, the `flags` field of `grpc_op` +instances of type `GRPC_OP_SEND_MESSAGE` must have its `GRPC_WRITE_NO_COMPRESS` +bit set. Refer to +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), diff --git a/doc/images/compression_cookbook_incoming.png b/doc/images/compression_cookbook_incoming.png Binary files differnew file mode 100644 index 0000000000..84d6f558b6 --- /dev/null +++ b/doc/images/compression_cookbook_incoming.png diff --git a/doc/images/compression_cookbook_outgoing.png b/doc/images/compression_cookbook_outgoing.png Binary files differnew file mode 100644 index 0000000000..59b995d154 --- /dev/null +++ b/doc/images/compression_cookbook_outgoing.png diff --git a/examples/cpp/helloworld/Makefile b/examples/cpp/helloworld/Makefile index 780e5e427a..b45b3c7ee0 100644 --- a/examples/cpp/helloworld/Makefile +++ b/examples/cpp/helloworld/Makefile @@ -97,7 +97,7 @@ ifneq ($(HAS_VALID_PROTOC),true) @echo "Please install Google protocol buffers 3.0.0 and its compiler." @echo "You can find it here:" @echo - @echo " https://github.com/google/protobuf/releases/tag/v3.0.0-beta-2" + @echo " https://github.com/google/protobuf/releases/tag/v3.0.0-beta-3.3" @echo @echo "Here is what I get when trying to evaluate your version of protoc:" @echo diff --git a/examples/cpp/route_guide/Makefile b/examples/cpp/route_guide/Makefile index 11f2a00cc8..50ecf041f5 100644 --- a/examples/cpp/route_guide/Makefile +++ b/examples/cpp/route_guide/Makefile @@ -86,7 +86,7 @@ ifneq ($(HAS_VALID_PROTOC),true) @echo "Please install Google protocol buffers 3.0.0 and its compiler." @echo "You can find it here:" @echo - @echo " https://github.com/google/protobuf/releases/tag/v3.0.0-beta-2" + @echo " https://github.com/google/protobuf/releases/tag/v3.0.0-beta-3.3" @echo @echo "Here is what I get when trying to evaluate your version of protoc:" @echo diff --git a/examples/csharp/helloworld/README.md b/examples/csharp/helloworld/README.md index 63131ed98c..d13c9ac9db 100644 --- a/examples/csharp/helloworld/README.md +++ b/examples/csharp/helloworld/README.md @@ -5,23 +5,16 @@ BACKGROUND ------------- For this sample, we've already generated the server and client stubs from [helloworld.proto][]. -Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/) +Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/), [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) and [Google.Protobuf](https://www.nuget.org/packages/Google.Protobuf/) NuGet packages which have been already added to the project for you. PREREQUISITES ------------- -**Windows** -- .NET 4.5+ -- Visual Studio 2013 or 2015 -**Linux** -- Mono 4.0+ -- Monodevelop 5.9+ (with NuGet plugin installed) - -**Mac OS X** -- Xamarin Studio 5.9+ -- [homebrew][] +- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015 +- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed) +- Mac OS X: Xamarin Studio 5.9+ BUILD ------- @@ -56,6 +49,5 @@ Tutorial You can find a more detailed tutorial in [gRPC Basics: C#][] -[homebrew]:http://brew.sh [helloworld.proto]:../../protos/helloworld.proto [gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html diff --git a/examples/objective-c/auth_sample/Podfile b/examples/objective-c/auth_sample/Podfile index 7affe08743..32157a9dce 100644 --- a/examples/objective-c/auth_sample/Podfile +++ b/examples/objective-c/auth_sample/Podfile @@ -1,9 +1,10 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios, '8.0' -pod 'Protobuf', :path => "../../../third_party/protobuf" -pod 'BoringSSL', :podspec => "../../../src/objective-c" -pod 'gRPC', :path => "../../.." +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../..' target 'AuthSample' do # Depend on the generated AuthTestService library. @@ -11,4 +12,35 @@ target 'AuthSample' do # Depend on Google's OAuth2 library pod 'Google/SignIn' + + # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following + # lines in your application. + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC +end + +# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in +# your application. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec + + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end diff --git a/examples/objective-c/helloworld/Podfile b/examples/objective-c/helloworld/Podfile index eebf05470d..e1bb4ddfd5 100644 --- a/examples/objective-c/helloworld/Podfile +++ b/examples/objective-c/helloworld/Podfile @@ -1,11 +1,43 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios, '8.0' -pod 'Protobuf', :path => "../../../third_party/protobuf" -pod 'BoringSSL', :podspec => "../../../src/objective-c" -pod 'gRPC', :path => "../../.." +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../..' target 'HelloWorld' do # Depend on the generated HelloWorld library. pod 'HelloWorld', :path => '.' + + # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following + # lines in your application. + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC +end + +# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in +# your application. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec + + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end diff --git a/examples/objective-c/route_guide/Podfile b/examples/objective-c/route_guide/Podfile index b9f2fefd6d..943f5464d8 100644 --- a/examples/objective-c/route_guide/Podfile +++ b/examples/objective-c/route_guide/Podfile @@ -1,10 +1,43 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios, '8.0' +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../..' + target 'RouteGuideClient' do - pod 'Protobuf', :path => "../../../third_party/protobuf" - pod 'BoringSSL', :podspec => "../../../src/objective-c" - pod 'gRPC', :path => "../../.." # Depend on the generated RouteGuide library. pod 'RouteGuide', :path => '.' + + # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following + # lines in your application. + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC +end + +# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in +# your application. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec + + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end diff --git a/examples/php/README.md b/examples/php/README.md index e56b017873..6889a6cb7e 100644 --- a/examples/php/README.md +++ b/examples/php/README.md @@ -37,7 +37,8 @@ TRY IT! ``` $ cd examples/node $ npm install - $ nodejs greeter_server.js + $ cd dynamic_codegen or cd static_codegen + $ node greeter_server.js ``` - Run the client diff --git a/examples/php/composer.json b/examples/php/composer.json index c837bf7ac0..950e11367d 100644 --- a/examples/php/composer.json +++ b/examples/php/composer.json @@ -9,6 +9,6 @@ } ], "require": { - "grpc/grpc": "dev-release-0_13" + "grpc/grpc": "v0.15.0" } } diff --git a/examples/python/helloworld/run_codegen.sh b/examples/python/helloworld/run_codegen.sh index 42b58e5021..34224e5c41 100755 --- a/examples/python/helloworld/run_codegen.sh +++ b/examples/python/helloworld/run_codegen.sh @@ -29,4 +29,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs. -protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/helloworld.proto +python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/helloworld.proto diff --git a/examples/python/route_guide/run_codegen.sh b/examples/python/route_guide/run_codegen.sh index d9d56c2d7a..a377a1ab40 100755 --- a/examples/python/route_guide/run_codegen.sh +++ b/examples/python/route_guide/run_codegen.sh @@ -29,4 +29,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs. -protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/route_guide.proto +python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/route_guide.proto diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec new file mode 100644 index 0000000000..e10e05387b --- /dev/null +++ b/gRPC-Core.podspec @@ -0,0 +1,762 @@ +# GRPC CocoaPods podspec +# This file has been automatically generated from a template file. Please make modifications to +# `templates/gRPC-Core.podspec.template` instead. This file can be regenerated from the template by +# running `tools/buildgen/generate_projects.sh`. + +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Pod::Spec.new do |s| + s.name = 'gRPC-Core' + version = '0.14.0' + s.version = version + s.summary = 'Core cross-platform gRPC library, written in C' + s.homepage = 'http://www.grpc.io' + s.license = 'New BSD' + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + s.source = { + :git => 'https://github.com/grpc/grpc.git', + :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. + :submodules => true, + } + + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + s.requires_arc = false + + name = 'grpc' + + # When creating a dynamic framework, name it grpc.framework instead of gRPC-Core.framework. + # This lets users write their includes like `#include <grpc/grpc.h>` as opposed to `#include + # <gRPC-Core/grpc.h>`. + s.module_name = name + + # When creating a dynamic framework, copy the headers under `include/grpc/` into the root of + # the `Headers/` directory of the framework (i.e., not under `Headers/include/grpc`). + # + # TODO(jcanizales): Debug why this doesn't work on macOS. + s.header_mappings_dir = 'include/grpc' + + # The above has an undesired effect when creating a static library: It forces users to write + # includes like `#include <gRPC-Core/grpc.h>`. `s.header_dir` adds a path prefix to that, and + # because Cocoapods lets omit the pod name when including headers of static libraries, the + # following lets users write `#include <grpc/grpc.h>`. + s.header_dir = name + + # The module map created automatically by Cocoapods doesn't work for C libraries like gRPC-Core. + s.module_map = 'include/grpc/module.modulemap' + + # To compile the library, we need the user headers search path (quoted includes) to point to the + # root of the repo, and the system headers search path (angled includes) to point to `include/`. + # Cocoapods effectively clones the repo under `<Podfile dir>/Pods/gRPC-Core/`, and sets a build + # variable called `$(PODS_ROOT)` to `<Podfile dir>/Pods/`, so we use that. + # + # Relying on the file structure under $(PODS_ROOT) isn't officially supported in Cocoapods, as it + # is taken as an implementation detail. We've asked for an alternative, and have been told that + # what we're doing should keep working: https://github.com/CocoaPods/CocoaPods/issues/4386 + # + # The `src_root` value of `$(PODS_ROOT)/gRPC-Core` assumes Cocoapods is installing this pod from + # its remote repo. For local development of this library, enabled by using `:path` in the Podfile, + # that assumption is wrong. In such case, the following settings need to be reset with the + # appropriate value of `src_root`. This can be accomplished in the `pre_install` hook of the + # Podfile; see `src/objective-c/tests/Podfile` for an example. + src_root = '$(PODS_ROOT)/gRPC-Core' + s.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } + + # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its + # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't + # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in + # practice). Because we need our `header_mappings_dir` to be `include/grpc/` for the reason + # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one + # for public headers and the other for implementation. Each gets its own `header_mappings_dir`, + # making the linter happy. + # + # The list of source files is generated by a template: `templates/gRPC-Core.podspec.template`. It + # can be regenerated from the template by running `tools/buildgen/generate_projects.sh`. + s.subspec 'Interface' do |ss| + ss.header_mappings_dir = 'include/grpc' + + ss.source_files = 'include/grpc/support/alloc.h', + 'include/grpc/support/atm.h', + 'include/grpc/support/atm_gcc_atomic.h', + 'include/grpc/support/atm_gcc_sync.h', + 'include/grpc/support/atm_windows.h', + 'include/grpc/support/avl.h', + 'include/grpc/support/cmdline.h', + 'include/grpc/support/cpu.h', + 'include/grpc/support/histogram.h', + 'include/grpc/support/host_port.h', + 'include/grpc/support/log.h', + 'include/grpc/support/log_windows.h', + 'include/grpc/support/port_platform.h', + 'include/grpc/support/slice.h', + 'include/grpc/support/slice_buffer.h', + 'include/grpc/support/string_util.h', + 'include/grpc/support/subprocess.h', + 'include/grpc/support/sync.h', + 'include/grpc/support/sync_generic.h', + 'include/grpc/support/sync_posix.h', + 'include/grpc/support/sync_windows.h', + 'include/grpc/support/thd.h', + 'include/grpc/support/time.h', + 'include/grpc/support/tls.h', + 'include/grpc/support/tls_gcc.h', + 'include/grpc/support/tls_msvc.h', + 'include/grpc/support/tls_pthread.h', + 'include/grpc/support/useful.h', + 'include/grpc/impl/codegen/alloc.h', + 'include/grpc/impl/codegen/atm.h', + 'include/grpc/impl/codegen/atm_gcc_atomic.h', + 'include/grpc/impl/codegen/atm_gcc_sync.h', + 'include/grpc/impl/codegen/atm_windows.h', + 'include/grpc/impl/codegen/log.h', + 'include/grpc/impl/codegen/port_platform.h', + 'include/grpc/impl/codegen/slice.h', + 'include/grpc/impl/codegen/slice_buffer.h', + 'include/grpc/impl/codegen/sync.h', + 'include/grpc/impl/codegen/sync_generic.h', + 'include/grpc/impl/codegen/sync_posix.h', + 'include/grpc/impl/codegen/sync_windows.h', + 'include/grpc/impl/codegen/time.h', + 'include/grpc/byte_buffer.h', + 'include/grpc/byte_buffer_reader.h', + 'include/grpc/compression.h', + 'include/grpc/grpc.h', + 'include/grpc/grpc_posix.h', + 'include/grpc/status.h', + 'include/grpc/impl/codegen/byte_buffer.h', + 'include/grpc/impl/codegen/byte_buffer_reader.h', + 'include/grpc/impl/codegen/compression_types.h', + 'include/grpc/impl/codegen/connectivity_state.h', + 'include/grpc/impl/codegen/grpc_types.h', + 'include/grpc/impl/codegen/propagation_bits.h', + 'include/grpc/impl/codegen/status.h', + 'include/grpc/impl/codegen/alloc.h', + 'include/grpc/impl/codegen/atm.h', + 'include/grpc/impl/codegen/atm_gcc_atomic.h', + 'include/grpc/impl/codegen/atm_gcc_sync.h', + 'include/grpc/impl/codegen/atm_windows.h', + 'include/grpc/impl/codegen/log.h', + 'include/grpc/impl/codegen/port_platform.h', + 'include/grpc/impl/codegen/slice.h', + 'include/grpc/impl/codegen/slice_buffer.h', + 'include/grpc/impl/codegen/sync.h', + 'include/grpc/impl/codegen/sync_generic.h', + 'include/grpc/impl/codegen/sync_posix.h', + 'include/grpc/impl/codegen/sync_windows.h', + 'include/grpc/impl/codegen/time.h', + 'include/grpc/grpc_security.h', + 'include/grpc/grpc_security_constants.h', + 'include/grpc/census.h' + end + s.subspec 'Implementation' do |ss| + ss.header_mappings_dir = '.' + ss.libraries = 'z' + ss.dependency "#{s.name}/Interface", version + ss.dependency 'BoringSSL', '~> 4.0' + + # To save you from scrolling, this is the last part of the podspec. + ss.source_files = 'src/core/lib/profiling/timers.h', + 'src/core/lib/support/backoff.h', + 'src/core/lib/support/block_annotate.h', + 'src/core/lib/support/env.h', + 'src/core/lib/support/murmur_hash.h', + 'src/core/lib/support/stack_lockfree.h', + 'src/core/lib/support/string.h', + 'src/core/lib/support/string_windows.h', + 'src/core/lib/support/thd_internal.h', + 'src/core/lib/support/time_precise.h', + 'src/core/lib/support/tmpfile.h', + 'src/core/lib/profiling/basic_timers.c', + 'src/core/lib/profiling/stap_timers.c', + 'src/core/lib/support/alloc.c', + 'src/core/lib/support/avl.c', + 'src/core/lib/support/backoff.c', + 'src/core/lib/support/cmdline.c', + 'src/core/lib/support/cpu_iphone.c', + 'src/core/lib/support/cpu_linux.c', + 'src/core/lib/support/cpu_posix.c', + 'src/core/lib/support/cpu_windows.c', + 'src/core/lib/support/env_linux.c', + 'src/core/lib/support/env_posix.c', + 'src/core/lib/support/env_windows.c', + 'src/core/lib/support/histogram.c', + 'src/core/lib/support/host_port.c', + 'src/core/lib/support/log.c', + 'src/core/lib/support/log_android.c', + 'src/core/lib/support/log_linux.c', + 'src/core/lib/support/log_posix.c', + 'src/core/lib/support/log_windows.c', + 'src/core/lib/support/murmur_hash.c', + 'src/core/lib/support/slice.c', + 'src/core/lib/support/slice_buffer.c', + 'src/core/lib/support/stack_lockfree.c', + 'src/core/lib/support/string.c', + 'src/core/lib/support/string_posix.c', + 'src/core/lib/support/string_util_windows.c', + 'src/core/lib/support/string_windows.c', + 'src/core/lib/support/subprocess_posix.c', + 'src/core/lib/support/subprocess_windows.c', + 'src/core/lib/support/sync.c', + 'src/core/lib/support/sync_posix.c', + 'src/core/lib/support/sync_windows.c', + 'src/core/lib/support/thd.c', + 'src/core/lib/support/thd_posix.c', + 'src/core/lib/support/thd_windows.c', + 'src/core/lib/support/time.c', + 'src/core/lib/support/time_posix.c', + 'src/core/lib/support/time_precise.c', + 'src/core/lib/support/time_windows.c', + 'src/core/lib/support/tls_pthread.c', + 'src/core/lib/support/tmpfile_msys.c', + 'src/core/lib/support/tmpfile_posix.c', + 'src/core/lib/support/tmpfile_windows.c', + 'src/core/lib/support/wrap_memcpy.c', + 'src/core/lib/channel/channel_args.h', + 'src/core/lib/channel/channel_stack.h', + 'src/core/lib/channel/channel_stack_builder.h', + 'src/core/lib/channel/compress_filter.h', + 'src/core/lib/channel/connected_channel.h', + 'src/core/lib/channel/context.h', + 'src/core/lib/channel/http_client_filter.h', + 'src/core/lib/channel/http_server_filter.h', + 'src/core/lib/compression/algorithm_metadata.h', + 'src/core/lib/compression/message_compress.h', + 'src/core/lib/debug/trace.h', + 'src/core/lib/http/format_request.h', + 'src/core/lib/http/httpcli.h', + 'src/core/lib/http/parser.h', + 'src/core/lib/iomgr/closure.h', + 'src/core/lib/iomgr/endpoint.h', + 'src/core/lib/iomgr/endpoint_pair.h', + 'src/core/lib/iomgr/error.h', + 'src/core/lib/iomgr/ev_epoll_linux.h', + 'src/core/lib/iomgr/ev_poll_and_epoll_posix.h', + 'src/core/lib/iomgr/ev_poll_posix.h', + 'src/core/lib/iomgr/ev_posix.h', + 'src/core/lib/iomgr/exec_ctx.h', + 'src/core/lib/iomgr/executor.h', + 'src/core/lib/iomgr/iocp_windows.h', + 'src/core/lib/iomgr/iomgr.h', + 'src/core/lib/iomgr/iomgr_internal.h', + 'src/core/lib/iomgr/iomgr_posix.h', + 'src/core/lib/iomgr/load_file.h', + 'src/core/lib/iomgr/network_status_tracker.h', + 'src/core/lib/iomgr/polling_entity.h', + 'src/core/lib/iomgr/pollset.h', + 'src/core/lib/iomgr/pollset_set.h', + 'src/core/lib/iomgr/pollset_set_windows.h', + 'src/core/lib/iomgr/pollset_windows.h', + 'src/core/lib/iomgr/resolve_address.h', + 'src/core/lib/iomgr/sockaddr.h', + 'src/core/lib/iomgr/sockaddr_posix.h', + 'src/core/lib/iomgr/sockaddr_utils.h', + 'src/core/lib/iomgr/sockaddr_windows.h', + 'src/core/lib/iomgr/socket_utils_posix.h', + 'src/core/lib/iomgr/socket_windows.h', + 'src/core/lib/iomgr/tcp_client.h', + 'src/core/lib/iomgr/tcp_posix.h', + 'src/core/lib/iomgr/tcp_server.h', + 'src/core/lib/iomgr/tcp_windows.h', + 'src/core/lib/iomgr/time_averaged_stats.h', + 'src/core/lib/iomgr/timer.h', + 'src/core/lib/iomgr/timer_heap.h', + 'src/core/lib/iomgr/udp_server.h', + 'src/core/lib/iomgr/unix_sockets_posix.h', + 'src/core/lib/iomgr/wakeup_fd_pipe.h', + 'src/core/lib/iomgr/wakeup_fd_posix.h', + 'src/core/lib/iomgr/workqueue.h', + 'src/core/lib/iomgr/workqueue_posix.h', + 'src/core/lib/iomgr/workqueue_windows.h', + 'src/core/lib/json/json.h', + 'src/core/lib/json/json_common.h', + 'src/core/lib/json/json_reader.h', + 'src/core/lib/json/json_writer.h', + 'src/core/lib/surface/api_trace.h', + 'src/core/lib/surface/call.h', + 'src/core/lib/surface/call_test_only.h', + 'src/core/lib/surface/channel.h', + 'src/core/lib/surface/channel_init.h', + 'src/core/lib/surface/channel_stack_type.h', + 'src/core/lib/surface/completion_queue.h', + 'src/core/lib/surface/event_string.h', + 'src/core/lib/surface/init.h', + 'src/core/lib/surface/lame_client.h', + 'src/core/lib/surface/server.h', + 'src/core/lib/transport/byte_stream.h', + 'src/core/lib/transport/connectivity_state.h', + 'src/core/lib/transport/metadata.h', + 'src/core/lib/transport/metadata_batch.h', + 'src/core/lib/transport/static_metadata.h', + 'src/core/lib/transport/transport.h', + 'src/core/lib/transport/transport_impl.h', + 'src/core/ext/transport/chttp2/transport/bin_decoder.h', + 'src/core/ext/transport/chttp2/transport/bin_encoder.h', + 'src/core/ext/transport/chttp2/transport/chttp2_transport.h', + 'src/core/ext/transport/chttp2/transport/frame.h', + 'src/core/ext/transport/chttp2/transport/frame_data.h', + 'src/core/ext/transport/chttp2/transport/frame_goaway.h', + 'src/core/ext/transport/chttp2/transport/frame_ping.h', + 'src/core/ext/transport/chttp2/transport/frame_rst_stream.h', + 'src/core/ext/transport/chttp2/transport/frame_settings.h', + 'src/core/ext/transport/chttp2/transport/frame_window_update.h', + 'src/core/ext/transport/chttp2/transport/hpack_encoder.h', + 'src/core/ext/transport/chttp2/transport/hpack_parser.h', + 'src/core/ext/transport/chttp2/transport/hpack_table.h', + 'src/core/ext/transport/chttp2/transport/http2_errors.h', + 'src/core/ext/transport/chttp2/transport/huffsyms.h', + 'src/core/ext/transport/chttp2/transport/incoming_metadata.h', + 'src/core/ext/transport/chttp2/transport/internal.h', + 'src/core/ext/transport/chttp2/transport/status_conversion.h', + 'src/core/ext/transport/chttp2/transport/stream_map.h', + 'src/core/ext/transport/chttp2/transport/timeout_encoding.h', + 'src/core/ext/transport/chttp2/transport/varint.h', + 'src/core/ext/transport/chttp2/alpn/alpn.h', + 'src/core/lib/security/context/security_context.h', + 'src/core/lib/security/credentials/composite/composite_credentials.h', + 'src/core/lib/security/credentials/credentials.h', + 'src/core/lib/security/credentials/fake/fake_credentials.h', + 'src/core/lib/security/credentials/google_default/google_default_credentials.h', + 'src/core/lib/security/credentials/iam/iam_credentials.h', + 'src/core/lib/security/credentials/jwt/json_token.h', + 'src/core/lib/security/credentials/jwt/jwt_credentials.h', + 'src/core/lib/security/credentials/jwt/jwt_verifier.h', + 'src/core/lib/security/credentials/oauth2/oauth2_credentials.h', + 'src/core/lib/security/credentials/plugin/plugin_credentials.h', + 'src/core/lib/security/credentials/ssl/ssl_credentials.h', + 'src/core/lib/security/transport/auth_filters.h', + 'src/core/lib/security/transport/handshake.h', + 'src/core/lib/security/transport/secure_endpoint.h', + 'src/core/lib/security/transport/security_connector.h', + 'src/core/lib/security/transport/tsi_error.h', + 'src/core/lib/security/util/b64.h', + 'src/core/lib/security/util/json_util.h', + 'src/core/lib/tsi/fake_transport_security.h', + 'src/core/lib/tsi/ssl_transport_security.h', + 'src/core/lib/tsi/ssl_types.h', + 'src/core/lib/tsi/transport_security.h', + 'src/core/lib/tsi/transport_security_interface.h', + 'src/core/ext/client_config/client_channel.h', + 'src/core/ext/client_config/client_channel_factory.h', + 'src/core/ext/client_config/client_config.h', + 'src/core/ext/client_config/connector.h', + 'src/core/ext/client_config/initial_connect_string.h', + 'src/core/ext/client_config/lb_policy.h', + 'src/core/ext/client_config/lb_policy_factory.h', + 'src/core/ext/client_config/lb_policy_registry.h', + 'src/core/ext/client_config/parse_address.h', + 'src/core/ext/client_config/resolver.h', + 'src/core/ext/client_config/resolver_factory.h', + 'src/core/ext/client_config/resolver_registry.h', + 'src/core/ext/client_config/subchannel.h', + 'src/core/ext/client_config/subchannel_call_holder.h', + 'src/core/ext/client_config/subchannel_index.h', + 'src/core/ext/client_config/uri_parser.h', + 'src/core/ext/lb_policy/grpclb/load_balancer_api.h', + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', + 'third_party/nanopb/pb.h', + 'third_party/nanopb/pb_common.h', + 'third_party/nanopb/pb_decode.h', + 'third_party/nanopb/pb_encode.h', + 'src/core/ext/load_reporting/load_reporting.h', + 'src/core/ext/load_reporting/load_reporting_filter.h', + 'src/core/ext/census/aggregation.h', + 'src/core/ext/census/census_interface.h', + 'src/core/ext/census/census_rpc_stats.h', + 'src/core/ext/census/gen/census.pb.h', + 'src/core/ext/census/grpc_filter.h', + 'src/core/ext/census/mlog.h', + 'src/core/ext/census/rpc_metric_id.h', + 'src/core/lib/surface/init.c', + 'src/core/lib/channel/channel_args.c', + 'src/core/lib/channel/channel_stack.c', + 'src/core/lib/channel/channel_stack_builder.c', + 'src/core/lib/channel/compress_filter.c', + 'src/core/lib/channel/connected_channel.c', + 'src/core/lib/channel/http_client_filter.c', + 'src/core/lib/channel/http_server_filter.c', + 'src/core/lib/compression/compression.c', + 'src/core/lib/compression/message_compress.c', + 'src/core/lib/debug/trace.c', + 'src/core/lib/http/format_request.c', + 'src/core/lib/http/httpcli.c', + 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/closure.c', + 'src/core/lib/iomgr/endpoint.c', + 'src/core/lib/iomgr/endpoint_pair_posix.c', + 'src/core/lib/iomgr/endpoint_pair_windows.c', + 'src/core/lib/iomgr/error.c', + 'src/core/lib/iomgr/ev_epoll_linux.c', + 'src/core/lib/iomgr/ev_poll_and_epoll_posix.c', + 'src/core/lib/iomgr/ev_poll_posix.c', + 'src/core/lib/iomgr/ev_posix.c', + 'src/core/lib/iomgr/exec_ctx.c', + 'src/core/lib/iomgr/executor.c', + 'src/core/lib/iomgr/iocp_windows.c', + 'src/core/lib/iomgr/iomgr.c', + 'src/core/lib/iomgr/iomgr_posix.c', + 'src/core/lib/iomgr/iomgr_windows.c', + 'src/core/lib/iomgr/load_file.c', + 'src/core/lib/iomgr/network_status_tracker.c', + 'src/core/lib/iomgr/polling_entity.c', + 'src/core/lib/iomgr/pollset_set_windows.c', + 'src/core/lib/iomgr/pollset_windows.c', + 'src/core/lib/iomgr/resolve_address_posix.c', + 'src/core/lib/iomgr/resolve_address_windows.c', + 'src/core/lib/iomgr/sockaddr_utils.c', + 'src/core/lib/iomgr/socket_utils_common_posix.c', + 'src/core/lib/iomgr/socket_utils_linux.c', + 'src/core/lib/iomgr/socket_utils_posix.c', + 'src/core/lib/iomgr/socket_windows.c', + 'src/core/lib/iomgr/tcp_client_posix.c', + 'src/core/lib/iomgr/tcp_client_windows.c', + 'src/core/lib/iomgr/tcp_posix.c', + 'src/core/lib/iomgr/tcp_server_posix.c', + 'src/core/lib/iomgr/tcp_server_windows.c', + 'src/core/lib/iomgr/tcp_windows.c', + 'src/core/lib/iomgr/time_averaged_stats.c', + 'src/core/lib/iomgr/timer.c', + 'src/core/lib/iomgr/timer_heap.c', + 'src/core/lib/iomgr/udp_server.c', + 'src/core/lib/iomgr/unix_sockets_posix.c', + 'src/core/lib/iomgr/unix_sockets_posix_noop.c', + 'src/core/lib/iomgr/wakeup_fd_eventfd.c', + 'src/core/lib/iomgr/wakeup_fd_nospecial.c', + 'src/core/lib/iomgr/wakeup_fd_pipe.c', + 'src/core/lib/iomgr/wakeup_fd_posix.c', + 'src/core/lib/iomgr/workqueue_posix.c', + 'src/core/lib/iomgr/workqueue_windows.c', + 'src/core/lib/json/json.c', + 'src/core/lib/json/json_reader.c', + 'src/core/lib/json/json_string.c', + 'src/core/lib/json/json_writer.c', + 'src/core/lib/surface/alarm.c', + 'src/core/lib/surface/api_trace.c', + 'src/core/lib/surface/byte_buffer.c', + 'src/core/lib/surface/byte_buffer_reader.c', + 'src/core/lib/surface/call.c', + 'src/core/lib/surface/call_details.c', + 'src/core/lib/surface/call_log_batch.c', + 'src/core/lib/surface/channel.c', + 'src/core/lib/surface/channel_init.c', + 'src/core/lib/surface/channel_ping.c', + 'src/core/lib/surface/channel_stack_type.c', + 'src/core/lib/surface/completion_queue.c', + 'src/core/lib/surface/event_string.c', + 'src/core/lib/surface/lame_client.c', + 'src/core/lib/surface/metadata_array.c', + 'src/core/lib/surface/server.c', + 'src/core/lib/surface/validate_metadata.c', + 'src/core/lib/surface/version.c', + 'src/core/lib/transport/byte_stream.c', + 'src/core/lib/transport/connectivity_state.c', + 'src/core/lib/transport/metadata.c', + 'src/core/lib/transport/metadata_batch.c', + 'src/core/lib/transport/static_metadata.c', + 'src/core/lib/transport/transport.c', + 'src/core/lib/transport/transport_op_string.c', + 'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c', + 'src/core/ext/transport/chttp2/transport/bin_decoder.c', + 'src/core/ext/transport/chttp2/transport/bin_encoder.c', + 'src/core/ext/transport/chttp2/transport/chttp2_plugin.c', + 'src/core/ext/transport/chttp2/transport/chttp2_transport.c', + 'src/core/ext/transport/chttp2/transport/frame_data.c', + 'src/core/ext/transport/chttp2/transport/frame_goaway.c', + 'src/core/ext/transport/chttp2/transport/frame_ping.c', + 'src/core/ext/transport/chttp2/transport/frame_rst_stream.c', + 'src/core/ext/transport/chttp2/transport/frame_settings.c', + 'src/core/ext/transport/chttp2/transport/frame_window_update.c', + 'src/core/ext/transport/chttp2/transport/hpack_encoder.c', + 'src/core/ext/transport/chttp2/transport/hpack_parser.c', + 'src/core/ext/transport/chttp2/transport/hpack_table.c', + 'src/core/ext/transport/chttp2/transport/huffsyms.c', + 'src/core/ext/transport/chttp2/transport/incoming_metadata.c', + 'src/core/ext/transport/chttp2/transport/parsing.c', + 'src/core/ext/transport/chttp2/transport/status_conversion.c', + 'src/core/ext/transport/chttp2/transport/stream_lists.c', + 'src/core/ext/transport/chttp2/transport/stream_map.c', + 'src/core/ext/transport/chttp2/transport/timeout_encoding.c', + 'src/core/ext/transport/chttp2/transport/varint.c', + 'src/core/ext/transport/chttp2/transport/writing.c', + 'src/core/ext/transport/chttp2/alpn/alpn.c', + 'src/core/lib/http/httpcli_security_connector.c', + 'src/core/lib/security/context/security_context.c', + 'src/core/lib/security/credentials/composite/composite_credentials.c', + 'src/core/lib/security/credentials/credentials.c', + 'src/core/lib/security/credentials/credentials_metadata.c', + 'src/core/lib/security/credentials/fake/fake_credentials.c', + 'src/core/lib/security/credentials/google_default/credentials_posix.c', + 'src/core/lib/security/credentials/google_default/credentials_windows.c', + 'src/core/lib/security/credentials/google_default/google_default_credentials.c', + 'src/core/lib/security/credentials/iam/iam_credentials.c', + 'src/core/lib/security/credentials/jwt/json_token.c', + 'src/core/lib/security/credentials/jwt/jwt_credentials.c', + 'src/core/lib/security/credentials/jwt/jwt_verifier.c', + 'src/core/lib/security/credentials/oauth2/oauth2_credentials.c', + 'src/core/lib/security/credentials/plugin/plugin_credentials.c', + 'src/core/lib/security/credentials/ssl/ssl_credentials.c', + 'src/core/lib/security/transport/client_auth_filter.c', + 'src/core/lib/security/transport/handshake.c', + 'src/core/lib/security/transport/secure_endpoint.c', + 'src/core/lib/security/transport/security_connector.c', + 'src/core/lib/security/transport/server_auth_filter.c', + 'src/core/lib/security/transport/tsi_error.c', + 'src/core/lib/security/util/b64.c', + 'src/core/lib/security/util/json_util.c', + 'src/core/lib/surface/init_secure.c', + 'src/core/lib/tsi/fake_transport_security.c', + 'src/core/lib/tsi/ssl_transport_security.c', + 'src/core/lib/tsi/transport_security.c', + 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c', + 'src/core/ext/client_config/channel_connectivity.c', + 'src/core/ext/client_config/client_channel.c', + 'src/core/ext/client_config/client_channel_factory.c', + 'src/core/ext/client_config/client_config.c', + 'src/core/ext/client_config/client_config_plugin.c', + 'src/core/ext/client_config/connector.c', + 'src/core/ext/client_config/default_initial_connect_string.c', + 'src/core/ext/client_config/initial_connect_string.c', + 'src/core/ext/client_config/lb_policy.c', + 'src/core/ext/client_config/lb_policy_factory.c', + 'src/core/ext/client_config/lb_policy_registry.c', + 'src/core/ext/client_config/parse_address.c', + 'src/core/ext/client_config/resolver.c', + 'src/core/ext/client_config/resolver_factory.c', + 'src/core/ext/client_config/resolver_registry.c', + 'src/core/ext/client_config/subchannel.c', + 'src/core/ext/client_config/subchannel_call_holder.c', + 'src/core/ext/client_config/subchannel_index.c', + 'src/core/ext/client_config/uri_parser.c', + 'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c', + 'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c', + 'src/core/ext/transport/chttp2/client/insecure/channel_create.c', + 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c', + 'src/core/ext/lb_policy/grpclb/load_balancer_api.c', + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c', + 'third_party/nanopb/pb_common.c', + 'third_party/nanopb/pb_decode.c', + 'third_party/nanopb/pb_encode.c', + 'src/core/ext/lb_policy/pick_first/pick_first.c', + 'src/core/ext/lb_policy/round_robin/round_robin.c', + 'src/core/ext/resolver/dns/native/dns_resolver.c', + 'src/core/ext/resolver/sockaddr/sockaddr_resolver.c', + 'src/core/ext/load_reporting/load_reporting.c', + 'src/core/ext/load_reporting/load_reporting_filter.c', + 'src/core/ext/census/context.c', + 'src/core/ext/census/gen/census.pb.c', + 'src/core/ext/census/grpc_context.c', + 'src/core/ext/census/grpc_filter.c', + 'src/core/ext/census/grpc_plugin.c', + 'src/core/ext/census/initialize.c', + 'src/core/ext/census/mlog.c', + 'src/core/ext/census/operation.c', + 'src/core/ext/census/placeholders.c', + 'src/core/ext/census/tracing.c', + 'src/core/plugin_registry/grpc_plugin_registry.c' + + ss.private_header_files = 'src/core/lib/profiling/timers.h', + 'src/core/lib/support/backoff.h', + 'src/core/lib/support/block_annotate.h', + 'src/core/lib/support/env.h', + 'src/core/lib/support/murmur_hash.h', + 'src/core/lib/support/stack_lockfree.h', + 'src/core/lib/support/string.h', + 'src/core/lib/support/string_windows.h', + 'src/core/lib/support/thd_internal.h', + 'src/core/lib/support/time_precise.h', + 'src/core/lib/support/tmpfile.h', + 'src/core/lib/channel/channel_args.h', + 'src/core/lib/channel/channel_stack.h', + 'src/core/lib/channel/channel_stack_builder.h', + 'src/core/lib/channel/compress_filter.h', + 'src/core/lib/channel/connected_channel.h', + 'src/core/lib/channel/context.h', + 'src/core/lib/channel/http_client_filter.h', + 'src/core/lib/channel/http_server_filter.h', + 'src/core/lib/compression/algorithm_metadata.h', + 'src/core/lib/compression/message_compress.h', + 'src/core/lib/debug/trace.h', + 'src/core/lib/http/format_request.h', + 'src/core/lib/http/httpcli.h', + 'src/core/lib/http/parser.h', + 'src/core/lib/iomgr/closure.h', + 'src/core/lib/iomgr/endpoint.h', + 'src/core/lib/iomgr/endpoint_pair.h', + 'src/core/lib/iomgr/error.h', + 'src/core/lib/iomgr/ev_epoll_linux.h', + 'src/core/lib/iomgr/ev_poll_and_epoll_posix.h', + 'src/core/lib/iomgr/ev_poll_posix.h', + 'src/core/lib/iomgr/ev_posix.h', + 'src/core/lib/iomgr/exec_ctx.h', + 'src/core/lib/iomgr/executor.h', + 'src/core/lib/iomgr/iocp_windows.h', + 'src/core/lib/iomgr/iomgr.h', + 'src/core/lib/iomgr/iomgr_internal.h', + 'src/core/lib/iomgr/iomgr_posix.h', + 'src/core/lib/iomgr/load_file.h', + 'src/core/lib/iomgr/network_status_tracker.h', + 'src/core/lib/iomgr/polling_entity.h', + 'src/core/lib/iomgr/pollset.h', + 'src/core/lib/iomgr/pollset_set.h', + 'src/core/lib/iomgr/pollset_set_windows.h', + 'src/core/lib/iomgr/pollset_windows.h', + 'src/core/lib/iomgr/resolve_address.h', + 'src/core/lib/iomgr/sockaddr.h', + 'src/core/lib/iomgr/sockaddr_posix.h', + 'src/core/lib/iomgr/sockaddr_utils.h', + 'src/core/lib/iomgr/sockaddr_windows.h', + 'src/core/lib/iomgr/socket_utils_posix.h', + 'src/core/lib/iomgr/socket_windows.h', + 'src/core/lib/iomgr/tcp_client.h', + 'src/core/lib/iomgr/tcp_posix.h', + 'src/core/lib/iomgr/tcp_server.h', + 'src/core/lib/iomgr/tcp_windows.h', + 'src/core/lib/iomgr/time_averaged_stats.h', + 'src/core/lib/iomgr/timer.h', + 'src/core/lib/iomgr/timer_heap.h', + 'src/core/lib/iomgr/udp_server.h', + 'src/core/lib/iomgr/unix_sockets_posix.h', + 'src/core/lib/iomgr/wakeup_fd_pipe.h', + 'src/core/lib/iomgr/wakeup_fd_posix.h', + 'src/core/lib/iomgr/workqueue.h', + 'src/core/lib/iomgr/workqueue_posix.h', + 'src/core/lib/iomgr/workqueue_windows.h', + 'src/core/lib/json/json.h', + 'src/core/lib/json/json_common.h', + 'src/core/lib/json/json_reader.h', + 'src/core/lib/json/json_writer.h', + 'src/core/lib/surface/api_trace.h', + 'src/core/lib/surface/call.h', + 'src/core/lib/surface/call_test_only.h', + 'src/core/lib/surface/channel.h', + 'src/core/lib/surface/channel_init.h', + 'src/core/lib/surface/channel_stack_type.h', + 'src/core/lib/surface/completion_queue.h', + 'src/core/lib/surface/event_string.h', + 'src/core/lib/surface/init.h', + 'src/core/lib/surface/lame_client.h', + 'src/core/lib/surface/server.h', + 'src/core/lib/transport/byte_stream.h', + 'src/core/lib/transport/connectivity_state.h', + 'src/core/lib/transport/metadata.h', + 'src/core/lib/transport/metadata_batch.h', + 'src/core/lib/transport/static_metadata.h', + 'src/core/lib/transport/transport.h', + 'src/core/lib/transport/transport_impl.h', + 'src/core/ext/transport/chttp2/transport/bin_decoder.h', + 'src/core/ext/transport/chttp2/transport/bin_encoder.h', + 'src/core/ext/transport/chttp2/transport/chttp2_transport.h', + 'src/core/ext/transport/chttp2/transport/frame.h', + 'src/core/ext/transport/chttp2/transport/frame_data.h', + 'src/core/ext/transport/chttp2/transport/frame_goaway.h', + 'src/core/ext/transport/chttp2/transport/frame_ping.h', + 'src/core/ext/transport/chttp2/transport/frame_rst_stream.h', + 'src/core/ext/transport/chttp2/transport/frame_settings.h', + 'src/core/ext/transport/chttp2/transport/frame_window_update.h', + 'src/core/ext/transport/chttp2/transport/hpack_encoder.h', + 'src/core/ext/transport/chttp2/transport/hpack_parser.h', + 'src/core/ext/transport/chttp2/transport/hpack_table.h', + 'src/core/ext/transport/chttp2/transport/http2_errors.h', + 'src/core/ext/transport/chttp2/transport/huffsyms.h', + 'src/core/ext/transport/chttp2/transport/incoming_metadata.h', + 'src/core/ext/transport/chttp2/transport/internal.h', + 'src/core/ext/transport/chttp2/transport/status_conversion.h', + 'src/core/ext/transport/chttp2/transport/stream_map.h', + 'src/core/ext/transport/chttp2/transport/timeout_encoding.h', + 'src/core/ext/transport/chttp2/transport/varint.h', + 'src/core/ext/transport/chttp2/alpn/alpn.h', + 'src/core/lib/security/context/security_context.h', + 'src/core/lib/security/credentials/composite/composite_credentials.h', + 'src/core/lib/security/credentials/credentials.h', + 'src/core/lib/security/credentials/fake/fake_credentials.h', + 'src/core/lib/security/credentials/google_default/google_default_credentials.h', + 'src/core/lib/security/credentials/iam/iam_credentials.h', + 'src/core/lib/security/credentials/jwt/json_token.h', + 'src/core/lib/security/credentials/jwt/jwt_credentials.h', + 'src/core/lib/security/credentials/jwt/jwt_verifier.h', + 'src/core/lib/security/credentials/oauth2/oauth2_credentials.h', + 'src/core/lib/security/credentials/plugin/plugin_credentials.h', + 'src/core/lib/security/credentials/ssl/ssl_credentials.h', + 'src/core/lib/security/transport/auth_filters.h', + 'src/core/lib/security/transport/handshake.h', + 'src/core/lib/security/transport/secure_endpoint.h', + 'src/core/lib/security/transport/security_connector.h', + 'src/core/lib/security/transport/tsi_error.h', + 'src/core/lib/security/util/b64.h', + 'src/core/lib/security/util/json_util.h', + 'src/core/lib/tsi/fake_transport_security.h', + 'src/core/lib/tsi/ssl_transport_security.h', + 'src/core/lib/tsi/ssl_types.h', + 'src/core/lib/tsi/transport_security.h', + 'src/core/lib/tsi/transport_security_interface.h', + 'src/core/ext/client_config/client_channel.h', + 'src/core/ext/client_config/client_channel_factory.h', + 'src/core/ext/client_config/client_config.h', + 'src/core/ext/client_config/connector.h', + 'src/core/ext/client_config/initial_connect_string.h', + 'src/core/ext/client_config/lb_policy.h', + 'src/core/ext/client_config/lb_policy_factory.h', + 'src/core/ext/client_config/lb_policy_registry.h', + 'src/core/ext/client_config/parse_address.h', + 'src/core/ext/client_config/resolver.h', + 'src/core/ext/client_config/resolver_factory.h', + 'src/core/ext/client_config/resolver_registry.h', + 'src/core/ext/client_config/subchannel.h', + 'src/core/ext/client_config/subchannel_call_holder.h', + 'src/core/ext/client_config/subchannel_index.h', + 'src/core/ext/client_config/uri_parser.h', + 'src/core/ext/lb_policy/grpclb/load_balancer_api.h', + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', + 'third_party/nanopb/pb.h', + 'third_party/nanopb/pb_common.h', + 'third_party/nanopb/pb_decode.h', + 'third_party/nanopb/pb_encode.h', + 'src/core/ext/load_reporting/load_reporting.h', + 'src/core/ext/load_reporting/load_reporting_filter.h', + 'src/core/ext/census/aggregation.h', + 'src/core/ext/census/census_interface.h', + 'src/core/ext/census/census_rpc_stats.h', + 'src/core/ext/census/gen/census.pb.h', + 'src/core/ext/census/grpc_filter.h', + 'src/core/ext/census/mlog.h', + 'src/core/ext/census/rpc_metric_id.h' + end +end diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec new file mode 100644 index 0000000000..9cc33c7dbd --- /dev/null +++ b/gRPC-ProtoRPC.podspec @@ -0,0 +1,69 @@ +# GRPC CocoaPods podspec +# This file has been automatically generated from a template file. +# Please look at the templates directory instead. +# This file can be regenerated from the template by running +# tools/buildgen/generate_projects.sh + +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Pod::Spec.new do |s| + s.name = 'gRPC-ProtoRPC' + version = '0.14.0' + s.version = version + s.summary = 'RPC library for Protocol Buffers, based on gRPC' + s.homepage = 'http://www.grpc.io' + s.license = 'New BSD' + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + s.source = { + :git => 'https://github.com/grpc/grpc.git', + :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + } + + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + + name = 'ProtoRPC' + s.module_name = name + s.header_dir = name + + src_dir = 'src/objective-c/ProtoRPC' + s.source_files = "#{src_dir}/*.{h,m}" + s.header_mappings_dir = "#{src_dir}" + + s.dependency 'gRPC', version + s.dependency 'gRPC-RxLibrary', version + s.dependency 'Protobuf', '~> 3.0.0-beta-3.1' + # This is needed by all pods that depend on Protobuf: + s.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } +end diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec new file mode 100644 index 0000000000..6263878213 --- /dev/null +++ b/gRPC-RxLibrary.podspec @@ -0,0 +1,62 @@ +# GRPC CocoaPods podspec +# This file has been automatically generated from a template file. +# Please look at the templates directory instead. +# This file can be regenerated from the template by running +# tools/buildgen/generate_projects.sh + +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Pod::Spec.new do |s| + s.name = 'gRPC-RxLibrary' + version = '0.14.0' + s.version = version + s.summary = 'Reactive Extensions library for iOS/OSX.' + s.homepage = 'http://www.grpc.io' + s.license = 'New BSD' + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + s.source = { + :git => 'https://github.com/grpc/grpc.git', + :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + } + + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + + name = 'RxLibrary' + s.module_name = name + s.header_dir = name + + src_dir = 'src/objective-c/RxLibrary' + s.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}" + s.private_header_files = "#{src_dir}/private/*.h" + s.header_mappings_dir = "#{src_dir}" +end diff --git a/include/grpc++/impl/codegen/core_codegen.h b/include/grpc++/impl/codegen/core_codegen.h index b0c4c57e66..9699abfb43 100644 --- a/include/grpc++/impl/codegen/core_codegen.h +++ b/include/grpc++/impl/codegen/core_codegen.h @@ -54,8 +54,8 @@ class CoreCodegen : public CoreCodegenInterface { void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) GRPC_OVERRIDE; - void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, - grpc_byte_buffer* buffer) GRPC_OVERRIDE; + int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) GRPC_OVERRIDE; void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader) GRPC_OVERRIDE; int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, diff --git a/include/grpc++/impl/codegen/core_codegen_interface.h b/include/grpc++/impl/codegen/core_codegen_interface.h index 64d882ed5d..f9a8f9b980 100644 --- a/include/grpc++/impl/codegen/core_codegen_interface.h +++ b/include/grpc++/impl/codegen/core_codegen_interface.h @@ -65,8 +65,9 @@ class CoreCodegenInterface { virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0; - virtual void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, - grpc_byte_buffer* buffer) = 0; + virtual int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) + GRPC_MUST_USE_RESULT = 0; virtual void grpc_byte_buffer_reader_destroy( grpc_byte_buffer_reader* reader) = 0; virtual int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, diff --git a/include/grpc++/impl/codegen/proto_utils.h b/include/grpc++/impl/codegen/proto_utils.h index 3bad468a74..d4599c5fff 100644 --- a/include/grpc++/impl/codegen/proto_utils.h +++ b/include/grpc++/impl/codegen/proto_utils.h @@ -111,14 +111,21 @@ class GrpcBufferReader GRPC_FINAL : public ::grpc::protobuf::io::ZeroCopyInputStream { public: explicit GrpcBufferReader(grpc_byte_buffer* buffer) - : byte_count_(0), backup_count_(0) { - g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader_, buffer); + : byte_count_(0), backup_count_(0), status_() { + if (!g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader_, + buffer)) { + status_ = Status(StatusCode::INTERNAL, + "Couldn't initialize byte buffer reader"); + } } ~GrpcBufferReader() GRPC_OVERRIDE { g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_); } bool Next(const void** data, int* size) GRPC_OVERRIDE { + if (!status_.ok()) { + return false; + } if (backup_count_ > 0) { *data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) - backup_count_; @@ -139,6 +146,8 @@ class GrpcBufferReader GRPC_FINAL return true; } + Status status() const { return status_; } + void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; } bool Skip(int count) GRPC_OVERRIDE { @@ -165,6 +174,7 @@ class GrpcBufferReader GRPC_FINAL int64_t backup_count_; grpc_byte_buffer_reader reader_; gpr_slice slice_; + Status status_; }; } // namespace internal @@ -202,6 +212,9 @@ class SerializationTraits<T, typename std::enable_if<std::is_base_of< Status result = g_core_codegen_interface->ok(); { internal::GrpcBufferReader reader(buffer); + if (!reader.status().ok()) { + return reader.status(); + } ::grpc::protobuf::io::CodedInputStream decoder(&reader); if (max_message_size > 0) { decoder.SetTotalBytesLimit(max_message_size, max_message_size); diff --git a/include/grpc++/support/byte_buffer.h b/include/grpc++/support/byte_buffer.h index f6eb09638f..20bd407109 100644 --- a/include/grpc++/support/byte_buffer.h +++ b/include/grpc++/support/byte_buffer.h @@ -64,7 +64,7 @@ class ByteBuffer GRPC_FINAL { ByteBuffer& operator=(const ByteBuffer&); /// Dump (read) the buffer contents into \a slices. - void Dump(std::vector<Slice>* slices) const; + Status Dump(std::vector<Slice>* slices) const; /// Remove all data. void Clear(); diff --git a/include/grpc/impl/codegen/byte_buffer.h b/include/grpc/impl/codegen/byte_buffer.h index 3ae8ac50ba..fe1e215979 100644 --- a/include/grpc/impl/codegen/byte_buffer.h +++ b/include/grpc/impl/codegen/byte_buffer.h @@ -1,6 +1,6 @@ /* * - * Copyright 2015, Google Inc. + * Copyright 2015-2016, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -93,9 +93,10 @@ GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer); struct grpc_byte_buffer_reader; typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader; -/** Initialize \a reader to read over \a buffer */ -GRPCAPI void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer); +/** Initialize \a reader to read over \a buffer. + * Returns 1 upon success, 0 otherwise. */ +GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, + grpc_byte_buffer *buffer); /** Cleanup and destroy \a reader */ GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader); diff --git a/include/grpc/module.modulemap b/include/grpc/module.modulemap new file mode 100644 index 0000000000..ae11a78b74 --- /dev/null +++ b/include/grpc/module.modulemap @@ -0,0 +1,5 @@ +framework module grpc { + umbrella header "grpc.h" + export * + module * { export * } +} diff --git a/package.json b/package.json index 68a31d794c..0e229c9842 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "grpc", - "version": "0.16.0-dev", + "version": "1.1.0-dev", "author": "Google Inc.", "description": "gRPC Library for Node", "homepage": "http://www.grpc.io/", @@ -59,7 +59,6 @@ "files": [ "LICENSE", "src/node/README.md", - "src/node/health_check", "src/proto", "etc", "src/node/index.js", diff --git a/package.xml b/package.xml index 49a6d900f1..d7d10b3f7c 100644 --- a/package.xml +++ b/package.xml @@ -13,8 +13,8 @@ <date>2016-06-30</date> <time>16:06:07</time> <version> - <release>0.16.0</release> - <api>0.16.0</api> + <release>1.1.0</release> + <api>1.1.0</api> </version> <stability> <release>beta</release> @@ -31,6 +31,7 @@ import os import os.path +import platform import shlex import shutil import sys @@ -56,10 +57,15 @@ os.chdir(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.abspath(PYTHON_STEM)) # Break import-style to ensure we can actually find our in-repo dependencies. +import _unixccompiler_patch import commands import grpc_core_dependencies import grpc_version +# TODO(atash) make this conditional on being on a mingw32 build +_unixccompiler_patch.monkeypatch_unix_compiler() + + LICENSE = '3-clause BSD' # Environment variable to determine whether or not the Cython extension should @@ -72,6 +78,14 @@ BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False) ENABLE_CYTHON_TRACING = os.environ.get( 'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False) +# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are +# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support. +# We use these environment variables to thus get around that without locking +# ourselves in w.r.t. the multitude of operating systems this ought to build on. +# By default we assume a GCC-like compiler. +EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', '')) +EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', '')) + CYTHON_EXTENSION_PACKAGE_NAMES = () CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',) @@ -81,9 +95,7 @@ CYTHON_HELPER_C_FILES = ( os.path.join(PYTHON_STEM, 'grpc/_cython/imports.generated.c'), ) -CORE_C_FILES = () -if not "win32" in sys.platform: - CORE_C_FILES += tuple(grpc_core_dependencies.CORE_SOURCE_FILES) +CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES) EXTENSION_INCLUDE_DIRECTORIES = ( (PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE) @@ -93,12 +105,17 @@ if "linux" in sys.platform: EXTENSION_LIBRARIES += ('rt',) if not "win32" in sys.platform: EXTENSION_LIBRARIES += ('m',) +if "win32" in sys.platform: + EXTENSION_LIBRARIES += ('ws2_32',) DEFINE_MACROS = (('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600), ('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),) +if "win32" in sys.platform: + DEFINE_MACROS += (('OPENSSL_WINDOWS', 1), ('WIN32_LEAN_AND_MEAN', 1),) + if '64bit' in platform.architecture()[0]: + DEFINE_MACROS += (('MS_WIN64', 1),) -LDFLAGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', '')) -CFLAGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', '')) - +LDFLAGS = tuple(EXTRA_LINK_ARGS) +CFLAGS = tuple(EXTRA_COMPILE_ARGS) if "linux" in sys.platform: LDFLAGS += ('-Wl,-wrap,memcpy',) if "linux" in sys.platform or "darwin" in sys.platform: diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc index 3ccfd5b037..be64776402 100644 --- a/src/compiler/objective_c_plugin.cc +++ b/src/compiler/objective_c_plugin.cc @@ -39,6 +39,11 @@ #include "src/compiler/objective_c_generator.h" #include "src/compiler/objective_c_generator_helpers.h" +#include <google/protobuf/compiler/objectivec/objectivec_helpers.h> + +using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName; +using ::google::protobuf::compiler::objectivec::IsProtobufLibraryBundledProtoFile; + class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { public: ObjectiveCGrpcGenerator() {} @@ -72,7 +77,21 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { for (int i = 0; i < file->dependency_count(); i++) { ::grpc::string header = grpc_objective_c_generator::MessageHeaderName( file->dependency(i)); - proto_imports += ::grpc::string("#import \"") + header + "\"\n"; + const grpc::protobuf::FileDescriptor *dependency = file->dependency(i); + if (IsProtobufLibraryBundledProtoFile(dependency)) { + ::grpc::string base_name = header; + grpc_generator::StripPrefix(&base_name, "google/protobuf/"); + // create the import code snippet + proto_imports += + "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n" + " #import <" + ::grpc::string(ProtobufLibraryFrameworkName) + + "/" + base_name + ">\n" + "#else\n" + " #import \"" + header + "\"\n" + "#endif\n"; + } else { + proto_imports += ::grpc::string("#import \"") + header + "\"\n"; + } } ::grpc::string declarations; @@ -85,7 +104,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n"; Write(context, file_name + ".pbrpc.h", - imports + '\n' + proto_imports + '\n' + kNonNullBegin + + imports + '\n' + proto_imports + '\n' + kNonNullBegin + declarations + kNonNullEnd); } diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 38e782b9b4..5aae753c07 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -513,6 +513,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, &s->global.received_trailing_metadata); grpc_chttp2_data_parser_init(&s->parsing.data_parser); gpr_slice_buffer_init(&s->writing.flow_controlled_buffer); + s->global.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); REF_TRANSPORT(t, "stream"); @@ -988,6 +989,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, const size_t metadata_peer_limit = transport_global->settings[GRPC_PEER_SETTINGS] [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE]; + if (transport_global->is_client) { + stream_global->deadline = + gpr_time_min(stream_global->deadline, + stream_global->send_initial_metadata->deadline); + } if (metadata_size > metadata_peer_limit) { cancel_from_api( exec_ctx, transport_global, stream_global, @@ -1366,7 +1372,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GRPC_ERROR_UNREF(error); } -static void status_codes_from_error(grpc_error *error, +static void status_codes_from_error(grpc_error *error, gpr_timespec deadline, grpc_chttp2_error_code *http2_error, grpc_status_code *grpc_status) { intptr_t ip_http; @@ -1386,8 +1392,8 @@ static void status_codes_from_error(grpc_error *error, if (have_grpc) { *grpc_status = (grpc_status_code)ip_grpc; } else if (have_http) { - *grpc_status = - grpc_chttp2_http2_error_to_grpc_status((grpc_chttp2_error_code)ip_http); + *grpc_status = grpc_chttp2_http2_error_to_grpc_status( + (grpc_chttp2_error_code)ip_http, deadline); } else { *grpc_status = GRPC_STATUS_INTERNAL; } @@ -1400,7 +1406,8 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx, if (!stream_global->read_closed || !stream_global->write_closed) { grpc_status_code grpc_status; grpc_chttp2_error_code http_error; - status_codes_from_error(due_to_error, &http_error, &grpc_status); + status_codes_from_error(due_to_error, stream_global->deadline, &http_error, + &grpc_status); if (stream_global->id != 0) { gpr_slice_buffer_add( @@ -1536,7 +1543,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, uint32_t len = 0; grpc_status_code grpc_status; grpc_chttp2_error_code http_error; - status_codes_from_error(error, &http_error, &grpc_status); + status_codes_from_error(error, stream_global->deadline, &http_error, + &grpc_status); GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index b5180c6fc8..8d79e93ceb 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -447,6 +447,8 @@ typedef struct { grpc_chttp2_incoming_metadata_buffer received_trailing_metadata; grpc_chttp2_incoming_frame_queue incoming_frames; + + gpr_timespec deadline; } grpc_chttp2_stream_global; typedef struct { diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 991d7729af..84eb5752f1 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -236,9 +236,10 @@ void grpc_chttp2_publish_reads( GRPC_ERROR_INT_HTTP2_ERROR, &reason); if (has_reason && reason != GRPC_CHTTP2_NO_ERROR) { grpc_status_code status_code = - has_reason ? grpc_chttp2_http2_error_to_grpc_status( - (grpc_chttp2_error_code)reason) - : GRPC_STATUS_INTERNAL; + has_reason + ? grpc_chttp2_http2_error_to_grpc_status( + (grpc_chttp2_error_code)reason, stream_global->deadline) + : GRPC_STATUS_INTERNAL; const char *status_details = grpc_error_string(stream_parsing->forced_close_error); gpr_slice slice_details = gpr_slice_from_copied_string(status_details); diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.c b/src/core/ext/transport/chttp2/transport/status_conversion.c index c42fb9b3a1..5dce2f2d0c 100644 --- a/src/core/ext/transport/chttp2/transport/status_conversion.c +++ b/src/core/ext/transport/chttp2/transport/status_conversion.c @@ -39,6 +39,8 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) { return GRPC_CHTTP2_NO_ERROR; case GRPC_STATUS_CANCELLED: return GRPC_CHTTP2_CANCEL; + case GRPC_STATUS_DEADLINE_EXCEEDED: + return GRPC_CHTTP2_CANCEL; case GRPC_STATUS_RESOURCE_EXHAUSTED: return GRPC_CHTTP2_ENHANCE_YOUR_CALM; case GRPC_STATUS_PERMISSION_DENIED: @@ -51,13 +53,17 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) { } grpc_status_code grpc_chttp2_http2_error_to_grpc_status( - grpc_chttp2_error_code error) { + grpc_chttp2_error_code error, gpr_timespec deadline) { switch (error) { case GRPC_CHTTP2_NO_ERROR: /* should never be received */ return GRPC_STATUS_INTERNAL; case GRPC_CHTTP2_CANCEL: - return GRPC_STATUS_CANCELLED; + /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been + * exceeded */ + return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0 + ? GRPC_STATUS_DEADLINE_EXCEEDED + : GRPC_STATUS_CANCELLED; case GRPC_CHTTP2_ENHANCE_YOUR_CALM: return GRPC_STATUS_RESOURCE_EXHAUSTED; case GRPC_CHTTP2_INADEQUATE_SECURITY: diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.h b/src/core/ext/transport/chttp2/transport/status_conversion.h index e7285e6fd5..953bc9f1e1 100644 --- a/src/core/ext/transport/chttp2/transport/status_conversion.h +++ b/src/core/ext/transport/chttp2/transport/status_conversion.h @@ -41,7 +41,7 @@ grpc_chttp2_error_code grpc_chttp2_grpc_status_to_http2_error( grpc_status_code status); grpc_status_code grpc_chttp2_http2_error_to_grpc_status( - grpc_chttp2_error_code error); + grpc_chttp2_error_code error, gpr_timespec deadline); /* Conversion of HTTP status codes (:status) to grpc status codes */ grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status); diff --git a/src/core/lib/http/parser.c.orig b/src/core/lib/http/parser.c.orig deleted file mode 100644 index 74d90fd8bf..0000000000 --- a/src/core/lib/http/parser.c.orig +++ /dev/null @@ -1,357 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include "src/core/lib/http/parser.h" - -#include <string.h> - -#include <grpc/support/alloc.h> -#include <grpc/support/log.h> -#include <grpc/support/useful.h> - -int grpc_http1_trace = 0; - -static char *buf2str(void *buffer, size_t length) { - char *out = gpr_malloc(length + 1); - memcpy(out, buffer, length); - out[length] = 0; - return out; -} - -static grpc_error *handle_response_line(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; - - if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'"); - if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'"); - if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'"); - if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'"); - if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'"); - if (cur == end || *cur++ != '1') return GRPC_ERROR_CREATE("Expected '1'"); - if (cur == end || *cur++ != '.') return GRPC_ERROR_CREATE("Expected '.'"); - if (cur == end || *cur < '0' || *cur++ > '1') { - return GRPC_ERROR_CREATE("Expected HTTP/1.0 or HTTP/1.1"); - } - if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '"); - if (cur == end || *cur < '1' || *cur++ > '9') - return GRPC_ERROR_CREATE("Expected status code"); - if (cur == end || *cur < '0' || *cur++ > '9') - return GRPC_ERROR_CREATE("Expected status code"); - if (cur == end || *cur < '0' || *cur++ > '9') - return GRPC_ERROR_CREATE("Expected status code"); - parser->http.response->status = - (cur[-3] - '0') * 100 + (cur[-2] - '0') * 10 + (cur[-1] - '0'); - if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '"); - - /* we don't really care about the status code message */ - - return GRPC_ERROR_NONE; -} - -static grpc_error *handle_request_line(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; - uint8_t vers_major = 0; - uint8_t vers_minor = 0; - - while (cur != end && *cur++ != ' ') - ; - if (cur == end) return GRPC_ERROR_CREATE("No method on HTTP request line"); - parser->http.request->method = buf2str(beg, (size_t)(cur - beg - 1)); - - beg = cur; - while (cur != end && *cur++ != ' ') - ; - if (cur == end) return GRPC_ERROR_CREATE("No path on HTTP request line"); - parser->http.request->path = buf2str(beg, (size_t)(cur - beg - 1)); - - if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'"); - if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'"); - if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'"); - if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'"); - if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'"); - vers_major = (uint8_t)(*cur++ - '1' + 1); - ++cur; - if (cur == end) - return GRPC_ERROR_CREATE("End of line in HTTP version string"); - vers_minor = (uint8_t)(*cur++ - '1' + 1); - - if (vers_major == 1) { - if (vers_minor == 0) { - parser->http.request->version = GRPC_HTTP_HTTP10; - } else if (vers_minor == 1) { - parser->http.request->version = GRPC_HTTP_HTTP11; - } else { - return GRPC_ERROR_CREATE( - "Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0"); - } - } else if (vers_major == 2) { - if (vers_minor == 0) { - parser->http.request->version = GRPC_HTTP_HTTP20; - } else { - return GRPC_ERROR_CREATE( - "Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0"); - } - } else { - return GRPC_ERROR_CREATE("Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0"); - } - - return GRPC_ERROR_NONE; -} - -static grpc_error *handle_first_line(grpc_http_parser *parser) { - switch (parser->type) { - case GRPC_HTTP_REQUEST: - return handle_request_line(parser); - case GRPC_HTTP_RESPONSE: - return handle_response_line(parser); - } - GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here")); -} - -static grpc_error *add_header(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; - size_t *hdr_count = NULL; - grpc_http_header **hdrs = NULL; - grpc_http_header hdr = {NULL, NULL}; - grpc_error *error = GRPC_ERROR_NONE; - - GPR_ASSERT(cur != end); - - if (*cur == ' ' || *cur == '\t') { - error = GRPC_ERROR_CREATE("Continued header lines not supported yet"); - goto done; - } - - while (cur != end && *cur != ':') { - cur++; - } - if (cur == end) { -<<<<<<< HEAD - error = GRPC_ERROR_CREATE("Didn't find ':' in header string"); - goto done; -======= - if (grpc_http1_trace) { - gpr_log(GPR_ERROR, "Didn't find ':' in header string"); - } - goto error; ->>>>>>> a709afe241d8b264a1c326315f757b4a8d330207 - } - GPR_ASSERT(cur >= beg); - hdr.key = buf2str(beg, (size_t)(cur - beg)); - cur++; /* skip : */ - - while (cur != end && (*cur == ' ' || *cur == '\t')) { - cur++; - } - GPR_ASSERT((size_t)(end - cur) >= parser->cur_line_end_length); - hdr.value = buf2str(cur, (size_t)(end - cur) - parser->cur_line_end_length); - - switch (parser->type) { - case GRPC_HTTP_RESPONSE: - hdr_count = &parser->http.response->hdr_count; - hdrs = &parser->http.response->hdrs; - break; - case GRPC_HTTP_REQUEST: - hdr_count = &parser->http.request->hdr_count; - hdrs = &parser->http.request->hdrs; - break; - } - - if (*hdr_count == parser->hdr_capacity) { - parser->hdr_capacity = - GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2); - *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs)); - } - (*hdrs)[(*hdr_count)++] = hdr; - -done: - if (error != GRPC_ERROR_NONE) { - gpr_free(hdr.key); - gpr_free(hdr.value); - } - return error; -} - -static grpc_error *finish_line(grpc_http_parser *parser) { - grpc_error *err; - switch (parser->state) { - case GRPC_HTTP_FIRST_LINE: - err = handle_first_line(parser); - if (err != GRPC_ERROR_NONE) return err; - parser->state = GRPC_HTTP_HEADERS; - break; - case GRPC_HTTP_HEADERS: - if (parser->cur_line_length == parser->cur_line_end_length) { - parser->state = GRPC_HTTP_BODY; - break; - } - err = add_header(parser); - if (err != GRPC_ERROR_NONE) { - return err; - } - break; - case GRPC_HTTP_BODY: - GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here")); - } - - parser->cur_line_length = 0; - return GRPC_ERROR_NONE; -} - -static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) { - size_t *body_length = NULL; - char **body = NULL; - - if (parser->type == GRPC_HTTP_RESPONSE) { - body_length = &parser->http.response->body_length; - body = &parser->http.response->body; - } else if (parser->type == GRPC_HTTP_REQUEST) { - body_length = &parser->http.request->body_length; - body = &parser->http.request->body; - } else { - GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here")); - } - - if (*body_length == parser->body_capacity) { - parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2); - *body = gpr_realloc((void *)*body, parser->body_capacity); - } - (*body)[*body_length] = (char)byte; - (*body_length)++; - - return GRPC_ERROR_NONE; -} - -static bool check_line(grpc_http_parser *parser) { - if (parser->cur_line_length >= 2 && - parser->cur_line[parser->cur_line_length - 2] == '\r' && - parser->cur_line[parser->cur_line_length - 1] == '\n') { - return true; - } - - // HTTP request with \n\r line termiantors. - else if (parser->cur_line_length >= 2 && - parser->cur_line[parser->cur_line_length - 2] == '\n' && - parser->cur_line[parser->cur_line_length - 1] == '\r') { - return true; - } - - // HTTP request with only \n line terminators. - else if (parser->cur_line_length >= 1 && - parser->cur_line[parser->cur_line_length - 1] == '\n') { - parser->cur_line_end_length = 1; - return true; - } - - return false; -} - -static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte) { - switch (parser->state) { - case GRPC_HTTP_FIRST_LINE: - case GRPC_HTTP_HEADERS: - if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) { - if (grpc_http1_trace) - gpr_log(GPR_ERROR, "HTTP client max line length (%d) exceeded", - GRPC_HTTP_PARSER_MAX_HEADER_LENGTH); - return 0; - } - parser->cur_line[parser->cur_line_length] = byte; - parser->cur_line_length++; - if (check_line(parser)) { - return finish_line(parser); - } else { - return GRPC_ERROR_NONE; - } - GPR_UNREACHABLE_CODE(return 0); - case GRPC_HTTP_BODY: - return addbyte_body(parser, byte); - } - GPR_UNREACHABLE_CODE(return 0); -} - -void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type, - void *request_or_response) { - memset(parser, 0, sizeof(*parser)); - parser->state = GRPC_HTTP_FIRST_LINE; - parser->type = type; - parser->http.request_or_response = request_or_response; - parser->cur_line_end_length = 2; -} - -void grpc_http_parser_destroy(grpc_http_parser *parser) {} - -void grpc_http_request_destroy(grpc_http_request *request) { - size_t i; - gpr_free(request->body); - for (i = 0; i < request->hdr_count; i++) { - gpr_free(request->hdrs[i].key); - gpr_free(request->hdrs[i].value); - } - gpr_free(request->hdrs); - gpr_free(request->method); - gpr_free(request->path); -} - -void grpc_http_response_destroy(grpc_http_response *response) { - size_t i; - gpr_free(response->body); - for (i = 0; i < response->hdr_count; i++) { - gpr_free(response->hdrs[i].key); - gpr_free(response->hdrs[i].value); - } - gpr_free(response->hdrs); -} - -grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, gpr_slice slice) { - size_t i; - - for (i = 0; i < GPR_SLICE_LENGTH(slice); i++) { - grpc_error *err = addbyte(parser, GPR_SLICE_START_PTR(slice)[i]); - if (err != GRPC_ERROR_NONE) return err; - } - - return GRPC_ERROR_NONE; -} - -grpc_error *grpc_http_parser_eof(grpc_http_parser *parser) { - if (parser->state != GRPC_HTTP_BODY) { - return GRPC_ERROR_CREATE("Did not finish headers"); - } - return GRPC_ERROR_NONE; -} diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index a1a463550a..d3803c3bd0 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -134,7 +134,7 @@ struct grpc_tcp_server { size_t pollset_count; /* next pollset to assign a channel to */ - size_t next_pollset_to_assign; + gpr_atm next_pollset_to_assign; }; static gpr_once check_init = GPR_ONCE_INIT; @@ -181,7 +181,7 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, s->head = NULL; s->tail = NULL; s->nports = 0; - s->next_pollset_to_assign = 0; + gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0); *server = s; return GRPC_ERROR_NONE; } @@ -369,7 +369,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { } read_notifier_pollset = - sp->server->pollsets[(sp->server->next_pollset_to_assign++) % + sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( + &sp->server->next_pollset_to_assign, 1) % sp->server->pollset_count]; /* loop until accept4 returns EAGAIN, and then re-arm notification */ diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index 14ccf72dc9..ed7929aa27 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -176,7 +176,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, calld->creds = grpc_composite_call_credentials_create(channel_call_creds, ctx->creds, NULL); if (calld->creds == NULL) { - bubble_up_error(exec_ctx, elem, GRPC_STATUS_INTERNAL, + bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED, "Incompatible credentials set on channel and call."); return; } @@ -205,7 +205,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data, char *error_msg; gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.", grpc_mdstr_as_c_string(calld->host)); - bubble_up_error(exec_ctx, elem, GRPC_STATUS_INTERNAL, error_msg); + bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED, error_msg); gpr_free(error_msg); } } diff --git a/src/core/lib/surface/byte_buffer_reader.c b/src/core/lib/surface/byte_buffer_reader.c index c97079f638..310bacb2c9 100644 --- a/src/core/lib/surface/byte_buffer_reader.c +++ b/src/core/lib/surface/byte_buffer_reader.c @@ -54,8 +54,8 @@ static int is_compressed(grpc_byte_buffer *buffer) { return 1 /* GPR_TRUE */; } -void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer) { +int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, + grpc_byte_buffer *buffer) { gpr_slice_buffer decompressed_slices_buffer; reader->buffer_in = buffer; switch (reader->buffer_in->type) { @@ -67,9 +67,10 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, &decompressed_slices_buffer) == 0) { gpr_log(GPR_ERROR, "Unexpected error decompressing data for algorithm with enum " - "value '%d'. Reading data as if it were uncompressed.", + "value '%d'.", reader->buffer_in->data.raw.compression); - reader->buffer_out = reader->buffer_in; + memset(reader, 0, sizeof(*reader)); + return 0; } else { /* all fine */ reader->buffer_out = grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices, @@ -82,6 +83,7 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, reader->current.index = 0; break; } + return 1; } void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) { diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index e5668be47f..fc9df76dc1 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -259,7 +259,8 @@ grpc_call *grpc_call_create( call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); } } - call->send_deadline = send_deadline; + call->send_deadline = + gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC); GRPC_CHANNEL_INTERNAL_REF(channel, "call"); /* initial refcount dropped by grpc_call_destroy */ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call, diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c index 2cf6d8890a..6d2b1c4935 100644 --- a/src/core/lib/surface/channel.c +++ b/src/core/lib/surface/channel.c @@ -81,7 +81,7 @@ struct grpc_channel { CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem)) /* the protobuf library will (by default) start warning at 100megs */ -#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024) +#define DEFAULT_MAX_MESSAGE_LENGTH (4 * 1024 * 1024) static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index 53f3c43854..1942075054 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -36,4 +36,4 @@ #include <grpc/grpc.h> -const char *grpc_version_string(void) { return "0.16.0-dev"; } +const char *grpc_version_string(void) { return "1.1.0-dev"; } diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc index cc35aa69ba..3d6780bcb8 100644 --- a/src/cpp/common/core_codegen.cc +++ b/src/cpp/common/core_codegen.cc @@ -74,9 +74,9 @@ void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) { ::grpc_byte_buffer_destroy(bb); } -void CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, - grpc_byte_buffer* buffer) { - ::grpc_byte_buffer_reader_init(reader, buffer); +int CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) { + return ::grpc_byte_buffer_reader_init(reader, buffer); } void CoreCodegen::grpc_byte_buffer_reader_destroy( diff --git a/src/cpp/util/byte_buffer.cc b/src/cpp/util/byte_buffer.cc index c0a14de418..c2cd20ee07 100644 --- a/src/cpp/util/byte_buffer.cc +++ b/src/cpp/util/byte_buffer.cc @@ -58,18 +58,22 @@ void ByteBuffer::Clear() { } } -void ByteBuffer::Dump(std::vector<Slice>* slices) const { +Status ByteBuffer::Dump(std::vector<Slice>* slices) const { slices->clear(); if (!buffer_) { - return; + return Status(StatusCode::FAILED_PRECONDITION, "Buffer not initialized"); } grpc_byte_buffer_reader reader; - grpc_byte_buffer_reader_init(&reader, buffer_); + if (!grpc_byte_buffer_reader_init(&reader, buffer_)) { + return Status(StatusCode::INTERNAL, + "Couldn't initialize byte buffer reader"); + } gpr_slice s; while (grpc_byte_buffer_reader_next(&reader, &s)) { slices->push_back(Slice(s, Slice::STEAL_REF)); } grpc_byte_buffer_reader_destroy(&reader); + return Status::OK; } size_t ByteBuffer::Length() const { diff --git a/src/csharp/Grpc.Auth/project.json b/src/csharp/Grpc.Auth/project.json index 72c258a91a..08429f1d46 100644 --- a/src/csharp/Grpc.Auth/project.json +++ b/src/csharp/Grpc.Auth/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Auth", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", @@ -22,7 +22,7 @@ } }, "dependencies": { - "Grpc.Core": "0.16.0-dev", + "Grpc.Core": "1.1.0-dev", "Google.Apis.Auth": "1.11.1" }, "frameworks": { diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs index cb20967680..553aeec58a 100644 --- a/src/csharp/Grpc.Core/VersionInfo.cs +++ b/src/csharp/Grpc.Core/VersionInfo.cs @@ -48,11 +48,11 @@ namespace Grpc.Core /// <summary> /// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies /// </summary> - public const string CurrentAssemblyFileVersion = "0.16.0.0"; + public const string CurrentAssemblyFileVersion = "1.1.0.0"; /// <summary> /// Current version of gRPC C# /// </summary> - public const string CurrentVersion = "0.16.0-dev"; + public const string CurrentVersion = "1.1.0-dev"; } } diff --git a/src/csharp/Grpc.Core/project.json b/src/csharp/Grpc.Core/project.json index 201e548801..4545d26aa5 100644 --- a/src/csharp/Grpc.Core/project.json +++ b/src/csharp/Grpc.Core/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Core", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", diff --git a/src/csharp/Grpc.HealthCheck/project.json b/src/csharp/Grpc.HealthCheck/project.json index d9daef720f..0e03e89d6a 100644 --- a/src/csharp/Grpc.HealthCheck/project.json +++ b/src/csharp/Grpc.HealthCheck/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Healthchecking", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", @@ -22,7 +22,7 @@ } }, "dependencies": { - "Grpc.Core": "0.16.0-dev", + "Grpc.Core": "1.1.0-dev", "Google.Protobuf": "3.0.0-beta3" }, "frameworks": { diff --git a/src/csharp/README.md b/src/csharp/README.md index 86394135c8..18d5945a8a 100644 --- a/src/csharp/README.md +++ b/src/csharp/README.md @@ -23,9 +23,9 @@ HOW TO USE - Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution. -- Add NuGet package `Grpc` as a dependency (Project options -> Manage NuGet Packages). +- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages). -- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add NuGet package `Grpc.Tools` that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. +- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. BUILD FROM SOURCE ----------------- diff --git a/src/csharp/build_packages.bat b/src/csharp/build_packages.bat index 272b30f385..f05c0241b6 100644 --- a/src/csharp/build_packages.bat +++ b/src/csharp/build_packages.bat @@ -30,7 +30,7 @@ @rem Builds gRPC NuGet packages @rem Current package versions -set VERSION=0.16.0-dev +set VERSION=1.1.0-dev set PROTOBUF_VERSION=3.0.0-beta3 @rem Packages that depend on prerelease packages (like Google.Protobuf) need to have prerelease suffix as well. diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index 9b8d050ea5..c670ea65c7 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -253,6 +253,7 @@ GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_batch_context_recv_message_length( if (!ctx->recv_message) { return -1; } + /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */ grpc_byte_buffer_reader_init(&reader, ctx->recv_message); return (intptr_t)grpc_byte_buffer_length(reader.buffer_out); } @@ -267,6 +268,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_recv_message_to_buffer( gpr_slice slice; size_t offset = 0; + /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */ grpc_byte_buffer_reader_init(&reader, ctx->recv_message); while (grpc_byte_buffer_reader_next(&reader, &slice)) { diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc index 3479a67702..a3f678f32c 100644 --- a/src/node/ext/byte_buffer.cc +++ b/src/node/ext/byte_buffer.cc @@ -73,7 +73,10 @@ Local<Value> ByteBufferToBuffer(grpc_byte_buffer *buffer) { return scope.Escape(Nan::Null()); } grpc_byte_buffer_reader reader; - grpc_byte_buffer_reader_init(&reader, buffer); + if (!grpc_byte_buffer_reader_init(&reader, buffer)) { + Nan::ThrowError("Error initializing byte buffer reader."); + return scope.Escape(Nan::Undefined()); + } gpr_slice slice = grpc_byte_buffer_reader_readall(&reader); size_t length = GPR_SLICE_LENGTH(slice); char *result = new char[length]; diff --git a/src/node/health_check/LICENSE b/src/node/health_check/LICENSE new file mode 100644 index 0000000000..0209b570e1 --- /dev/null +++ b/src/node/health_check/LICENSE @@ -0,0 +1,28 @@ +Copyright 2015, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/node/health_check/health.js b/src/node/health_check/health.js index 5236683088..64ba9fb960 100644 --- a/src/node/health_check/health.js +++ b/src/node/health_check/health.js @@ -33,14 +33,12 @@ 'use strict'; -var grpc = require('../'); +var grpc = require('grpc'); var _ = require('lodash'); -var health_proto = grpc.load(__dirname + - '/../../proto/grpc/health/v1/health.proto'); - -var HealthClient = health_proto.grpc.health.v1.Health; +var health_messages = require('./v1/health_pb'); +var health_service = require('./v1/health_grpc_pb'); function HealthImplementation(statusMap) { this.statusMap = _.clone(statusMap); @@ -51,17 +49,19 @@ HealthImplementation.prototype.setStatus = function(service, status) { }; HealthImplementation.prototype.check = function(call, callback){ - var service = call.request.service; + var service = call.request.getService(); var status = _.get(this.statusMap, service, null); if (status === null) { callback({code:grpc.status.NOT_FOUND}); } else { - callback(null, {status: status}); + var response = new health_messages.HealthCheckResponse(); + response.setStatus(status); + callback(null, response); } }; module.exports = { - Client: HealthClient, - service: HealthClient.service, + Client: health_service.HealthClient, + service: health_service.HealthService, Implementation: HealthImplementation }; diff --git a/src/node/health_check/node_modules/grpc.js b/src/node/health_check/node_modules/grpc.js new file mode 100644 index 0000000000..42161198cc --- /dev/null +++ b/src/node/health_check/node_modules/grpc.js @@ -0,0 +1,37 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* This exists solely to allow the generated code to import the grpc module + * without using a relative path */ + +module.exports = require('../..'); diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json new file mode 100644 index 0000000000..67f5301df7 --- /dev/null +++ b/src/node/health_check/package.json @@ -0,0 +1,29 @@ +{ + "name": "grpc-health-check", + "version": "1.1.0-dev", + "author": "Google Inc.", + "description": "Health check service for use with gRPC", + "repository": { + "type": "git", + "url": "https://github.com/grpc/grpc.git" + }, + "bugs": "https://github.com/grpc/grpc/issues", + "contributors": [ + { + "name": "Michael Lumish", + "email": "mlumish@google.com" + } + ], + "dependencies": { + "grpc": "^0.15.0", + "lodash": "^3.9.3", + "google-protobuf": "^3.0.0-alpha.5" + }, + "files": { + "LICENSE", + "health.js", + "v1" + }, + "main": "src/node/index.js", + "license": "BSD-3-Clause" +} diff --git a/src/objective-c/tests/RemoteTestClient/empty.proto b/src/node/health_check/v1/health_grpc_pb.js index a678048289..89bc304e56 100644 --- a/src/objective-c/tests/RemoteTestClient/empty.proto +++ b/src/node/health_check/v1/health_grpc_pb.js @@ -1,3 +1,6 @@ +// GENERATED CODE -- DO NOT EDIT! + +// Original file comments: // Copyright 2015, Google Inc. // All rights reserved. // @@ -26,19 +29,46 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +'use strict'; +var grpc = require('grpc'); +var v1_health_pb = require('../v1/health_pb.js'); -syntax = "proto3"; +function serialize_HealthCheckRequest(arg) { + if (!(arg instanceof v1_health_pb.HealthCheckRequest)) { + throw new Error('Expected argument of type HealthCheckRequest'); + } + return new Buffer(arg.serializeBinary()); +} -package grpc.testing; +function deserialize_HealthCheckRequest(buffer_arg) { + return v1_health_pb.HealthCheckRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} -option objc_class_prefix = "RMT"; +function serialize_HealthCheckResponse(arg) { + if (!(arg instanceof v1_health_pb.HealthCheckResponse)) { + throw new Error('Expected argument of type HealthCheckResponse'); + } + return new Buffer(arg.serializeBinary()); +} -// An empty message that you can re-use to avoid defining duplicated empty -// messages in your project. A typical example is to use it as argument or the -// return value of a service API. For instance: -// -// service Foo { -// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; -// }; -// -message Empty {} +function deserialize_HealthCheckResponse(buffer_arg) { + return v1_health_pb.HealthCheckResponse.deserializeBinary(new Uint8Array(buffer_arg)); +} + + +var HealthService = exports.HealthService = { + check: { + path: '/grpc.health.v1.Health/Check', + requestStream: false, + responseStream: false, + requestType: v1_health_pb.HealthCheckRequest, + responseType: v1_health_pb.HealthCheckResponse, + requestSerialize: serialize_HealthCheckRequest, + requestDeserialize: deserialize_HealthCheckRequest, + responseSerialize: serialize_HealthCheckResponse, + responseDeserialize: deserialize_HealthCheckResponse, + }, +}; + +exports.HealthClient = grpc.makeGenericClientConstructor(HealthService); diff --git a/src/node/health_check/v1/health_pb.js b/src/node/health_check/v1/health_pb.js new file mode 100644 index 0000000000..b36d47cdbb --- /dev/null +++ b/src/node/health_check/v1/health_pb.js @@ -0,0 +1,342 @@ +/** + * @fileoverview + * @enhanceable + * @public + */ +// GENERATED CODE -- DO NOT EDIT! + +var jspb = require('google-protobuf'); +var goog = jspb; +var global = Function('return this')(); + +goog.exportSymbol('proto.grpc.health.v1.HealthCheckRequest', null, global); +goog.exportSymbol('proto.grpc.health.v1.HealthCheckResponse', null, global); +goog.exportSymbol('proto.grpc.health.v1.HealthCheckResponse.ServingStatus', null, global); + +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.grpc.health.v1.HealthCheckRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.grpc.health.v1.HealthCheckRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + proto.grpc.health.v1.HealthCheckRequest.displayName = 'proto.grpc.health.v1.HealthCheckRequest'; +} + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto suitable for use in Soy templates. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * To access a reserved field use, foo.pb_<name>, eg, foo.pb_default. + * For the list of reserved names please see: + * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS. + * @param {boolean=} opt_includeInstance Whether to include the JSPB instance + * for transitional soy proto support: http://goto/soy-param-migration + * @return {!Object} + */ +proto.grpc.health.v1.HealthCheckRequest.prototype.toObject = function(opt_includeInstance) { + return proto.grpc.health.v1.HealthCheckRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Whether to include the JSPB + * instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.grpc.health.v1.HealthCheckRequest} msg The msg instance to transform. + * @return {!Object} + */ +proto.grpc.health.v1.HealthCheckRequest.toObject = function(includeInstance, msg) { + var f, obj = { + service: msg.getService() + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.grpc.health.v1.HealthCheckRequest} + */ +proto.grpc.health.v1.HealthCheckRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.grpc.health.v1.HealthCheckRequest; + return proto.grpc.health.v1.HealthCheckRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.grpc.health.v1.HealthCheckRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.grpc.health.v1.HealthCheckRequest} + */ +proto.grpc.health.v1.HealthCheckRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setService(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Class method variant: serializes the given message to binary data + * (in protobuf wire format), writing to the given BinaryWriter. + * @param {!proto.grpc.health.v1.HealthCheckRequest} message + * @param {!jspb.BinaryWriter} writer + */ +proto.grpc.health.v1.HealthCheckRequest.serializeBinaryToWriter = function(message, writer) { + message.serializeBinaryToWriter(writer); +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.grpc.health.v1.HealthCheckRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + this.serializeBinaryToWriter(writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the message to binary data (in protobuf wire format), + * writing to the given BinaryWriter. + * @param {!jspb.BinaryWriter} writer + */ +proto.grpc.health.v1.HealthCheckRequest.prototype.serializeBinaryToWriter = function (writer) { + var f = undefined; + f = this.getService(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } +}; + + +/** + * Creates a deep clone of this proto. No data is shared with the original. + * @return {!proto.grpc.health.v1.HealthCheckRequest} The clone. + */ +proto.grpc.health.v1.HealthCheckRequest.prototype.cloneMessage = function() { + return /** @type {!proto.grpc.health.v1.HealthCheckRequest} */ (jspb.Message.cloneMessage(this)); +}; + + +/** + * optional string service = 1; + * @return {string} + */ +proto.grpc.health.v1.HealthCheckRequest.prototype.getService = function() { + return /** @type {string} */ (jspb.Message.getFieldProto3(this, 1, "")); +}; + + +/** @param {string} value */ +proto.grpc.health.v1.HealthCheckRequest.prototype.setService = function(value) { + jspb.Message.setField(this, 1, value); +}; + + + +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.grpc.health.v1.HealthCheckResponse = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.grpc.health.v1.HealthCheckResponse, jspb.Message); +if (goog.DEBUG && !COMPILED) { + proto.grpc.health.v1.HealthCheckResponse.displayName = 'proto.grpc.health.v1.HealthCheckResponse'; +} + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto suitable for use in Soy templates. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * To access a reserved field use, foo.pb_<name>, eg, foo.pb_default. + * For the list of reserved names please see: + * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS. + * @param {boolean=} opt_includeInstance Whether to include the JSPB instance + * for transitional soy proto support: http://goto/soy-param-migration + * @return {!Object} + */ +proto.grpc.health.v1.HealthCheckResponse.prototype.toObject = function(opt_includeInstance) { + return proto.grpc.health.v1.HealthCheckResponse.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Whether to include the JSPB + * instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.grpc.health.v1.HealthCheckResponse} msg The msg instance to transform. + * @return {!Object} + */ +proto.grpc.health.v1.HealthCheckResponse.toObject = function(includeInstance, msg) { + var f, obj = { + status: msg.getStatus() + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.grpc.health.v1.HealthCheckResponse} + */ +proto.grpc.health.v1.HealthCheckResponse.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.grpc.health.v1.HealthCheckResponse; + return proto.grpc.health.v1.HealthCheckResponse.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.grpc.health.v1.HealthCheckResponse} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.grpc.health.v1.HealthCheckResponse} + */ +proto.grpc.health.v1.HealthCheckResponse.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} */ (reader.readEnum()); + msg.setStatus(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Class method variant: serializes the given message to binary data + * (in protobuf wire format), writing to the given BinaryWriter. + * @param {!proto.grpc.health.v1.HealthCheckResponse} message + * @param {!jspb.BinaryWriter} writer + */ +proto.grpc.health.v1.HealthCheckResponse.serializeBinaryToWriter = function(message, writer) { + message.serializeBinaryToWriter(writer); +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.grpc.health.v1.HealthCheckResponse.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + this.serializeBinaryToWriter(writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the message to binary data (in protobuf wire format), + * writing to the given BinaryWriter. + * @param {!jspb.BinaryWriter} writer + */ +proto.grpc.health.v1.HealthCheckResponse.prototype.serializeBinaryToWriter = function (writer) { + var f = undefined; + f = this.getStatus(); + if (f !== 0.0) { + writer.writeEnum( + 1, + f + ); + } +}; + + +/** + * Creates a deep clone of this proto. No data is shared with the original. + * @return {!proto.grpc.health.v1.HealthCheckResponse} The clone. + */ +proto.grpc.health.v1.HealthCheckResponse.prototype.cloneMessage = function() { + return /** @type {!proto.grpc.health.v1.HealthCheckResponse} */ (jspb.Message.cloneMessage(this)); +}; + + +/** + * optional ServingStatus status = 1; + * @return {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} + */ +proto.grpc.health.v1.HealthCheckResponse.prototype.getStatus = function() { + return /** @type {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} */ (jspb.Message.getFieldProto3(this, 1, 0)); +}; + + +/** @param {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} value */ +proto.grpc.health.v1.HealthCheckResponse.prototype.setStatus = function(value) { + jspb.Message.setField(this, 1, value); +}; + + +/** + * @enum {number} + */ +proto.grpc.health.v1.HealthCheckResponse.ServingStatus = { + UNKNOWN: 0, + SERVING: 1, + NOT_SERVING: 2 +}; + +goog.object.extend(exports, proto.grpc.health.v1); diff --git a/src/node/test/health_test.js b/src/node/test/health_test.js index c93b528d42..efbca46c2d 100644 --- a/src/node/test/health_test.js +++ b/src/node/test/health_test.js @@ -35,15 +35,19 @@ var assert = require('assert'); -var health = require('../health_check/health.js'); +var health = require('../health_check/health'); + +var health_messages = require('../health_check/v1/health_pb'); + +var ServingStatus = health_messages.HealthCheckResponse.ServingStatus; var grpc = require('../'); describe('Health Checking', function() { var statusMap = { - '': 'SERVING', - 'grpc.test.TestServiceNotServing': 'NOT_SERVING', - 'grpc.test.TestServiceServing': 'SERVING' + '': ServingStatus.SERVING, + 'grpc.test.TestServiceNotServing': ServingStatus.NOT_SERVING, + 'grpc.test.TestServiceServing': ServingStatus.SERVING }; var healthServer; var healthImpl; @@ -51,7 +55,7 @@ describe('Health Checking', function() { before(function() { healthServer = new grpc.Server(); healthImpl = new health.Implementation(statusMap); - healthServer.addProtoService(health.service, healthImpl); + healthServer.addService(health.service, healthImpl); var port_num = healthServer.bind('0.0.0.0:0', grpc.ServerCredentials.createInsecure()); healthServer.start(); @@ -62,43 +66,51 @@ describe('Health Checking', function() { healthServer.forceShutdown(); }); it('should say an enabled service is SERVING', function(done) { - healthClient.check({service: ''}, function(err, response) { + var request = new health_messages.HealthCheckRequest(); + request.setService(''); + healthClient.check(request, function(err, response) { assert.ifError(err); - assert.strictEqual(response.status, 'SERVING'); + assert.strictEqual(response.getStatus(), ServingStatus.SERVING); done(); }); }); it('should say that a disabled service is NOT_SERVING', function(done) { - healthClient.check({service: 'grpc.test.TestServiceNotServing'}, - function(err, response) { - assert.ifError(err); - assert.strictEqual(response.status, 'NOT_SERVING'); - done(); - }); + var request = new health_messages.HealthCheckRequest(); + request.setService('grpc.test.TestServiceNotServing'); + healthClient.check(request, function(err, response) { + assert.ifError(err); + assert.strictEqual(response.getStatus(), ServingStatus.NOT_SERVING); + done(); + }); }); it('should say that an enabled service is SERVING', function(done) { - healthClient.check({service: 'grpc.test.TestServiceServing'}, - function(err, response) { - assert.ifError(err); - assert.strictEqual(response.status, 'SERVING'); - done(); - }); + var request = new health_messages.HealthCheckRequest(); + request.setService('grpc.test.TestServiceServing'); + healthClient.check(request, function(err, response) { + assert.ifError(err); + assert.strictEqual(response.getStatus(), ServingStatus.SERVING); + done(); + }); }); it('should get NOT_FOUND if the service is not registered', function(done) { - healthClient.check({service: 'not_registered'}, function(err, response) { + var request = new health_messages.HealthCheckRequest(); + request.setService('not_registered'); + healthClient.check(request, function(err, response) { assert(err); assert.strictEqual(err.code, grpc.status.NOT_FOUND); done(); }); }); it('should get a different response if the status changes', function(done) { - healthClient.check({service: 'transient'}, function(err, response) { + var request = new health_messages.HealthCheckRequest(); + request.setService('transient'); + healthClient.check(request, function(err, response) { assert(err); assert.strictEqual(err.code, grpc.status.NOT_FOUND); - healthImpl.setStatus('transient', 'SERVING'); - healthClient.check({service: 'transient'}, function(err, response) { + healthImpl.setStatus('transient', ServingStatus.SERVING); + healthClient.check(request, function(err, response) { assert.ifError(err); - assert.strictEqual(response.status, 'SERVING'); + assert.strictEqual(response.getStatus(), ServingStatus.SERVING); done(); }); }); diff --git a/src/node/tools/package.json b/src/node/tools/package.json index 7c256d7ba0..e5513d7879 100644 --- a/src/node/tools/package.json +++ b/src/node/tools/package.json @@ -1,6 +1,6 @@ { "name": "grpc-tools", - "version": "0.16.0-dev", + "version": "1.1.0-dev", "author": "Google Inc.", "description": "Tools for developing with gRPC on Node.js", "homepage": "http://www.grpc.io/", diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec index 7d1de80716..42b4434d0d 100644 --- a/src/objective-c/BoringSSL.podspec +++ b/src/objective-c/BoringSSL.podspec @@ -31,7 +31,8 @@ Pod::Spec.new do |s| s.name = 'BoringSSL' - s.version = '3.0' + version = '4.0' + s.version = version s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.' # Adapted from the homepage: s.description = <<-DESC @@ -67,31 +68,139 @@ Pod::Spec.new do |s| s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite' s.source = { :git => 'https://boringssl.googlesource.com/boringssl', - :tag => 'version_for_cocoapods_3.0' } + :tag => "version_for_cocoapods_#{version}" } - s.source_files = 'ssl/*.{h,c}', - 'ssl/**/*.{h,c}', - '*.{h,c}', - 'crypto/*.{h,c}', - 'crypto/**/*.{h,c}', - 'include/openssl/*.h' + name = 'openssl' - s.public_header_files = 'include/openssl/*.h' - s.header_mappings_dir = 'include' + # When creating a dynamic framework, name it openssl.framework instead of BoringSSL.framework. + # This lets users write their includes like `#include <openssl/ssl.h>` as opposed to `#include + # <BoringSSL/ssl.h>`. + s.module_name = name - s.exclude_files = "**/*_test.*" + # When creating a dynamic framework, copy the headers under `include/openssl/` into the root of + # the `Headers/` directory of the framework (i.e., not under `Headers/include/openssl`). + # + # TODO(jcanizales): Debug why this doesn't work on macOS. + s.header_mappings_dir = 'include/openssl' + + # The above has an undesired effect when creating a static library: It forces users to write + # includes like `#include <BoringSSL/ssl.h>`. `s.header_dir` adds a path prefix to that, and + # because Cocoapods lets omit the pod name when including headers of static libraries, the + # following lets users write `#include <openssl/ssl.h>`. + s.header_dir = name + + # The module map and umbrella header created automatically by Cocoapods don't work for C libraries + # like this one. The following file, and a correct umbrella header, are created on the fly by the + # `prepare_command` of this pod. + s.module_map = 'include/openssl/module.modulemap' # We don't need to inhibit all warnings; only -Wno-shorten-64-to-32. But Cocoapods' linter doesn't # want that for some reason. s.compiler_flags = '-DOPENSSL_NO_ASM', '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w' s.requires_arc = false + # Like many other C libraries, BoringSSL has its public headers under `include/<libname>/` and its + # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't + # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in + # practice). Because we need our `header_mappings_dir` to be `include/openssl/` for the reason + # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one + # for public headers and the other for implementation. Each gets its own `header_mappings_dir`, + # making the linter happy. + s.subspec 'Interface' do |ss| + ss.header_mappings_dir = 'include/openssl' + ss.source_files = 'include/openssl/*.h' + end + s.subspec 'Implementation' do |ss| + ss.header_mappings_dir = '.' + ss.source_files = 'ssl/*.{h,c}', + 'ssl/**/*.{h,c}', + '*.{h,c}', + 'crypto/*.{h,c}', + 'crypto/**/*.{h,c}' + ss.private_header_files = 'ssl/*.h', + 'ssl/**/*.h', + '*.h', + 'crypto/*.h', + 'crypto/**/*.h' + ss.exclude_files = '**/*_test.*', + '**/test_*.*', + '**/test/*.*' + + ss.dependency "#{s.name}/Interface", version + end + s.prepare_command = <<-END_OF_COMMAND # Replace "const BIGNUM *I" in rsa.h with a lowercase i, as the former fails when including # OpenSSL in a Swift bridging header (complex.h defines "I", and it's as if the compiler # included it in every bridged header). sed -E -i '.back' 's/\\*I,/*i,/g' include/openssl/rsa.h + # Replace `#include "../crypto/internal.h"` in e_tls.c with `#include "../internal.h"`. The + # former assumes crypto/ is in the headers search path, which is hard to enforce when using + # dynamic frameworks. The latters always works, being relative to the current file. + sed -E -i '.back' 's/crypto\\///g' crypto/cipher/e_tls.c + + # Add a module map and an umbrella header + cat > include/openssl/umbrella.h <<EOF + #include "ssl.h" + #include "crypto.h" + #include "aes.h" + /* The following macros are defined by base.h. The latter is the first file included by the + other headers. */ + #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) + # include "arm_arch.h" + #endif + #include "asn1.h" + #include "asn1_mac.h" + #include "asn1t.h" + #include "blowfish.h" + #include "cast.h" + #include "chacha.h" + #include "cmac.h" + #include "conf.h" + #include "cpu.h" + #include "curve25519.h" + #include "des.h" + #include "dtls1.h" + #include "hkdf.h" + #include "md4.h" + #include "md5.h" + #include "newhope.h" + #include "obj_mac.h" + #include "objects.h" + #include "opensslv.h" + #include "ossl_typ.h" + #include "pkcs12.h" + #include "pkcs7.h" + #include "pkcs8.h" + #include "poly1305.h" + #include "rand.h" + #include "rc4.h" + #include "ripemd.h" + #include "safestack.h" + #include "srtp.h" + #include "time_support.h" + #include "x509.h" + #include "x509v3.h" + EOF + cat > include/openssl/module.modulemap <<EOF + framework module openssl { + umbrella header "umbrella.h" + export * + module * { export * } + } + EOF + + # #include <inttypes.h> fails to compile when building a dynamic framework. libgit2 in + # https://github.com/libgit2/libgit2/commit/1ddada422caf8e72ba97dca2568d2bf879fed5f2 and libvpx + # in https://chromium.googlesource.com/webm/libvpx/+/1bec0c5a7e885ec792f6bb658eb3f34ad8f37b15 + # work around it by removing the include. We need four of its macros, so we expand them here. + sed -E -i '.back' '/<inttypes.h>/d' include/openssl/bn.h + sed -E -i '.back' 's/PRIu32/"u"/g' include/openssl/bn.h + sed -E -i '.back' 's/PRIx32/"x"/g' include/openssl/bn.h + sed -E -i '.back' 's/PRIu64/"llu"/g' include/openssl/bn.h + sed -E -i '.back' 's/PRIx64/"llx"/g' include/openssl/bn.h + # This is a bit ridiculous, but requiring people to install Go in order to build is slightly # more ridiculous IMO. To save you from scrolling, this is the last part of the podspec. # TODO(jcanizales): Translate err_data_generate.go into a Bash or Ruby script. @@ -110,7 +219,7 @@ Pod::Spec.new do |s| * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - /* This file was generated by err_data_generate.go. */ + /* This file was generated by err_data_generate.go. */ #include <openssl/base.h> #include <openssl/err.h> @@ -152,178 +261,166 @@ Pod::Spec.new do |s| OPENSSL_COMPILE_ASSERT(ERR_NUM_LIBS == 33, library_values_changed_num); const uint32_t kOpenSSLReasonValues[] = { - 0xc3207ba, - 0xc3287d4, - 0xc3307e3, - 0xc3387f3, - 0xc340802, - 0xc34881b, - 0xc350827, - 0xc358844, - 0xc360856, - 0xc368864, - 0xc370874, - 0xc378881, - 0xc380891, - 0xc38889c, - 0xc3908b2, - 0xc3988c1, - 0xc3a08d5, - 0xc3a87c7, - 0xc3b00b0, - 0x10321478, - 0x10329484, - 0x1033149d, - 0x103394b0, - 0x10340de1, - 0x103494cf, - 0x103514e4, - 0x10359516, - 0x1036152f, - 0x10369544, - 0x10371562, - 0x10379571, - 0x1038158d, - 0x103895a8, - 0x103915b7, - 0x103995d3, - 0x103a15ee, - 0x103a9605, - 0x103b1616, - 0x103b962a, - 0x103c1649, - 0x103c9658, - 0x103d166f, - 0x103d9682, - 0x103e0b6c, - 0x103e96b3, - 0x103f16c6, - 0x103f96e0, - 0x104016f0, - 0x10409704, - 0x1041171a, - 0x10419732, - 0x10421747, - 0x1042975b, - 0x1043176d, - 0x104385d0, - 0x104408c1, - 0x10449782, - 0x10451799, - 0x104597ae, - 0x104617bc, - 0x10469695, - 0x104714f7, - 0x104787c7, - 0x104800b0, - 0x104894c3, - 0x14320b4f, - 0x14328b5d, - 0x14330b6c, - 0x14338b7e, + 0xc320838, + 0xc328852, + 0xc330861, + 0xc338871, + 0xc340880, + 0xc348899, + 0xc3508a5, + 0xc3588c2, + 0xc3608d4, + 0xc3688e2, + 0xc3708f2, + 0xc3788ff, + 0xc38090f, + 0xc38891a, + 0xc390930, + 0xc39893f, + 0xc3a0953, + 0xc3a8845, + 0xc3b00ea, + 0x10320845, + 0x103293ab, + 0x103313b7, + 0x103393d0, + 0x103413e3, + 0x10348e8b, + 0x10350c19, + 0x103593f6, + 0x1036140b, + 0x1036941e, + 0x1037143d, + 0x10379456, + 0x1038146b, + 0x10389489, + 0x10391498, + 0x103994b4, + 0x103a14cf, + 0x103a94de, + 0x103b14fa, + 0x103b9515, + 0x103c152c, + 0x103c80ea, + 0x103d153d, + 0x103d9551, + 0x103e1570, + 0x103e957f, + 0x103f1596, + 0x103f95a9, + 0x10400bea, + 0x104095bc, + 0x104115da, + 0x104195ed, + 0x10421607, + 0x10429617, + 0x1043162b, + 0x10439641, + 0x10441659, + 0x1044966e, + 0x10451682, + 0x10459694, + 0x104605fb, + 0x1046893f, + 0x104716a9, + 0x104796c0, + 0x104816d5, + 0x104896e3, + 0x14320bcd, + 0x14328bdb, + 0x14330bea, + 0x14338bfc, + 0x143400ac, + 0x143480ea, 0x18320083, - 0x18328e47, - 0x18340e75, - 0x18348e89, - 0x18358ec0, - 0x18368eed, - 0x18370f00, - 0x18378f14, - 0x18380f38, - 0x18388f46, - 0x18390f5c, - 0x18398f70, - 0x183a0f80, - 0x183b0f90, - 0x183b8fa5, - 0x183c8fd0, - 0x183d0fe4, - 0x183d8ff4, - 0x183e0b9b, - 0x183e9001, - 0x183f1013, - 0x183f901e, - 0x1840102e, - 0x1840903f, - 0x18411050, - 0x18419062, - 0x1842108b, - 0x184290bd, - 0x184310cc, - 0x18451135, - 0x1845914b, - 0x18461166, - 0x18468ed8, - 0x184709d9, - 0x18478094, - 0x18480fbc, - 0x18489101, - 0x18490e5d, - 0x18498e9e, - 0x184a119c, - 0x184a9119, - 0x184b10e0, - 0x184b8e37, - 0x184c10a4, - 0x184c866b, - 0x184d1181, - 0x203211c3, - 0x243211cf, - 0x24328907, - 0x243311e1, - 0x243391ee, - 0x243411fb, - 0x2434920d, - 0x2435121c, - 0x24359239, - 0x24361246, - 0x24369254, - 0x24371262, - 0x24379270, - 0x24381279, - 0x24389286, - 0x24391299, - 0x28320b8f, - 0x28328b9b, - 0x28330b6c, - 0x28338bae, - 0x2c322c0b, - 0x2c32ac19, - 0x2c332c2b, - 0x2c33ac3d, - 0x2c342c51, - 0x2c34ac63, - 0x2c352c7e, - 0x2c35ac90, - 0x2c362ca3, - 0x2c3682f3, - 0x2c372cb0, - 0x2c37acc2, - 0x2c382cd5, - 0x2c38ace3, - 0x2c392cf3, - 0x2c39ad05, - 0x2c3a2d19, - 0x2c3aad2a, - 0x2c3b1359, - 0x2c3bad3b, - 0x2c3c2d4f, - 0x2c3cad65, - 0x2c3d2d7e, - 0x2c3dadac, - 0x2c3e2dba, - 0x2c3eadd2, - 0x2c3f2dea, - 0x2c3fadf7, - 0x2c402e1a, - 0x2c40ae39, - 0x2c4111c3, - 0x2c41ae4a, - 0x2c422e5d, - 0x2c429135, - 0x2c432e6e, - 0x2c4386a2, - 0x2c442d9b, + 0x18328ee1, + 0x183300ac, + 0x18338ef7, + 0x18340f0b, + 0x183480ea, + 0x18350f20, + 0x18358f38, + 0x18360f4d, + 0x18368f61, + 0x18370f85, + 0x18378f9b, + 0x18380faf, + 0x18388fbf, + 0x18390a57, + 0x18398fcf, + 0x183a0fe4, + 0x183a8ff8, + 0x183b0c25, + 0x183b9005, + 0x183c1017, + 0x183c9022, + 0x183d1032, + 0x183d9043, + 0x183e1054, + 0x183e9066, + 0x183f108f, + 0x183f90a8, + 0x184010c0, + 0x184086d3, + 0x203210e7, + 0x243210f3, + 0x24328985, + 0x24331105, + 0x24339112, + 0x2434111f, + 0x24349131, + 0x24351140, + 0x2435915d, + 0x2436116a, + 0x24369178, + 0x24371186, + 0x24379194, + 0x2438119d, + 0x243891aa, + 0x243911bd, + 0x28320c0d, + 0x28328c25, + 0x28330bea, + 0x28338c38, + 0x28340c19, + 0x283480ac, + 0x283500ea, + 0x2c322775, + 0x2c32a783, + 0x2c332795, + 0x2c33a7a7, + 0x2c3427bb, + 0x2c34a7cd, + 0x2c3527e8, + 0x2c35a7fa, + 0x2c36280d, + 0x2c36832d, + 0x2c37281a, + 0x2c37a82c, + 0x2c38283f, + 0x2c38a856, + 0x2c392864, + 0x2c39a874, + 0x2c3a2886, + 0x2c3aa89a, + 0x2c3b28ab, + 0x2c3ba8ca, + 0x2c3c28de, + 0x2c3ca8f4, + 0x2c3d290d, + 0x2c3da92a, + 0x2c3e293b, + 0x2c3ea949, + 0x2c3f2961, + 0x2c3fa979, + 0x2c402986, + 0x2c4090e7, + 0x2c412997, + 0x2c41a9aa, + 0x2c4210c0, + 0x2c42a9bb, + 0x2c430720, + 0x2c43a8bc, 0x30320000, 0x30328015, 0x3033001f, @@ -333,479 +430,451 @@ Pod::Spec.new do |s| 0x3035006b, 0x30358083, 0x30360094, - 0x303680a1, - 0x303700b0, - 0x303780bd, - 0x303800d0, - 0x303880eb, - 0x30390100, - 0x30398114, - 0x303a0128, - 0x303a8139, - 0x303b0152, - 0x303b816f, - 0x303c017d, - 0x303c8191, - 0x303d01a1, - 0x303d81ba, - 0x303e01ca, - 0x303e81dd, - 0x303f01ec, - 0x303f81f8, - 0x3040020d, - 0x3040821d, - 0x30410234, - 0x30418241, - 0x30420254, - 0x30428263, - 0x30430278, - 0x30438299, - 0x304402ac, - 0x304482bf, - 0x304502d8, - 0x304582f3, - 0x30460310, - 0x30468329, - 0x30470337, - 0x30478348, - 0x30480357, - 0x3048836f, - 0x30490381, - 0x30498395, - 0x304a03b4, - 0x304a83c7, - 0x304b03d2, - 0x304b83e1, - 0x304c03f2, - 0x304c83fe, - 0x304d0414, - 0x304d8422, - 0x304e0438, - 0x304e844a, - 0x304f045c, - 0x304f846f, - 0x30500482, - 0x30508493, - 0x305104a3, - 0x305184bb, - 0x305204d0, - 0x305284e8, - 0x305304fc, - 0x30538514, - 0x3054052d, - 0x30548546, - 0x30550563, - 0x3055856e, - 0x30560586, - 0x30568596, - 0x305705a7, - 0x305785ba, - 0x305805d0, - 0x305885d9, - 0x305905ee, - 0x30598601, - 0x305a0610, - 0x305a8630, - 0x305b063f, - 0x305b864b, - 0x305c066b, - 0x305c8687, - 0x305d0698, - 0x305d86a2, - 0x34320ac9, - 0x34328add, - 0x34330afa, - 0x34338b0d, - 0x34340b1c, - 0x34348b39, + 0x303680ac, + 0x303700b9, + 0x303780c8, + 0x303800ea, + 0x303880f7, + 0x3039010a, + 0x30398125, + 0x303a013a, + 0x303a814e, + 0x303b0162, + 0x303b8173, + 0x303c018c, + 0x303c81a9, + 0x303d01b7, + 0x303d81cb, + 0x303e01db, + 0x303e81f4, + 0x303f0204, + 0x303f8217, + 0x30400226, + 0x30408232, + 0x30410247, + 0x30418257, + 0x3042026e, + 0x3042827b, + 0x3043028e, + 0x3043829d, + 0x304402b2, + 0x304482d3, + 0x304502e6, + 0x304582f9, + 0x30460312, + 0x3046832d, + 0x3047034a, + 0x30478363, + 0x30480371, + 0x30488382, + 0x30490391, + 0x304983a9, + 0x304a03bb, + 0x304a83cf, + 0x304b03ee, + 0x304b8401, + 0x304c040c, + 0x304c841d, + 0x304d0429, + 0x304d843f, + 0x304e044d, + 0x304e8463, + 0x304f0475, + 0x304f8487, + 0x3050049a, + 0x305084ad, + 0x305104be, + 0x305184ce, + 0x305204e6, + 0x305284fb, + 0x30530513, + 0x30538527, + 0x3054053f, + 0x30548558, + 0x30550571, + 0x3055858e, + 0x30560599, + 0x305685b1, + 0x305705c1, + 0x305785d2, + 0x305805e5, + 0x305885fb, + 0x30590604, + 0x30598619, + 0x305a062c, + 0x305a863b, + 0x305b065b, + 0x305b866a, + 0x305c068b, + 0x305c86a7, + 0x305d06b3, + 0x305d86d3, + 0x305e06ef, + 0x305e8700, + 0x305f0716, + 0x305f8720, + 0x34320b47, + 0x34328b5b, + 0x34330b78, + 0x34338b8b, + 0x34340b9a, + 0x34348bb7, 0x3c320083, - 0x3c328bd8, - 0x3c330bf1, - 0x3c338c0c, - 0x3c340c29, - 0x3c348c44, - 0x3c350c5f, - 0x3c358c74, - 0x3c360c8d, - 0x3c368ca5, - 0x3c370cb6, - 0x3c378cc4, - 0x3c380cd1, - 0x3c388ce5, - 0x3c390b9b, - 0x3c398cf9, - 0x3c3a0d0d, - 0x3c3a8881, - 0x3c3b0d1d, - 0x3c3b8d38, - 0x3c3c0d4a, - 0x3c3c8d60, - 0x3c3d0d6a, - 0x3c3d8d7e, - 0x3c3e0d8c, - 0x3c3e8db1, - 0x3c3f0bc4, - 0x3c3f8d9a, - 0x403217d3, - 0x403297e9, - 0x40331817, - 0x40339821, - 0x40341838, - 0x40349856, - 0x40351866, - 0x40359878, - 0x40361885, - 0x40369891, - 0x403718a6, - 0x403798bb, - 0x403818cd, - 0x403898d8, - 0x403918ea, - 0x40398de1, - 0x403a18fa, - 0x403a990d, - 0x403b192e, - 0x403b993f, - 0x403c194f, - 0x403c8064, - 0x403d195b, - 0x403d9977, - 0x403e198d, - 0x403e999c, - 0x403f19af, - 0x403f99c9, - 0x404019d7, - 0x404099ec, - 0x40411a00, - 0x40419a1d, - 0x40421a36, - 0x40429a51, - 0x40431a6a, - 0x40439a7d, - 0x40441a91, - 0x40449aa9, - 0x40451af4, - 0x40459b02, - 0x40461b20, - 0x40468094, - 0x40471b35, - 0x40479b47, - 0x40481b6b, - 0x40489b99, - 0x40491bad, - 0x40499bc2, - 0x404a1bdb, - 0x404a9c15, - 0x404b1c46, - 0x404b9c7c, - 0x404c1c97, - 0x404c9cb1, - 0x404d1cc8, - 0x404d9cf0, - 0x404e1d07, - 0x404e9d23, - 0x404f1d3f, - 0x404f9d60, - 0x40501d82, - 0x40509d9e, - 0x40511db2, - 0x40519dbf, - 0x40521dd6, - 0x40529de6, - 0x40531df6, - 0x40539e0a, - 0x40541e25, - 0x40549e35, - 0x40551e4c, - 0x40559e5b, - 0x40561e88, - 0x40569ea0, - 0x40571ebc, - 0x40579ed5, - 0x40581ee8, - 0x40589efd, - 0x40591f20, - 0x40599f4b, - 0x405a1f58, - 0x405a9f71, - 0x405b1f89, - 0x405b9f9c, - 0x405c1fb1, - 0x405c9fc3, - 0x405d1fd8, - 0x405d9fe8, - 0x405e2001, - 0x405ea015, - 0x405f2025, - 0x405fa03d, - 0x4060204e, - 0x4060a061, - 0x40612072, - 0x4061a090, - 0x406220a1, - 0x4062a0ae, - 0x406320c5, - 0x4063a106, - 0x4064211d, - 0x4064a12a, - 0x40652138, - 0x4065a15a, - 0x40662182, - 0x4066a197, - 0x406721ae, - 0x4067a1bf, - 0x406821d0, - 0x4068a1e1, - 0x406921f6, - 0x4069a20d, - 0x406a221e, - 0x406aa237, - 0x406b2252, - 0x406ba269, - 0x406c22d6, - 0x406ca2f7, - 0x406d230a, - 0x406da32b, - 0x406e2346, - 0x406ea38f, - 0x406f23b0, - 0x406fa3d6, - 0x407023f6, - 0x4070a412, - 0x4071259f, - 0x4071a5c2, - 0x407225d8, - 0x4072a5f7, - 0x4073260f, - 0x4073a62f, - 0x40742859, - 0x4074a87e, - 0x40752899, - 0x4075a8b8, - 0x407628e7, - 0x4076a90f, - 0x40772940, - 0x4077a95f, - 0x40782999, - 0x4078a9b0, - 0x407929c3, - 0x4079a9e0, - 0x407a0782, - 0x407aa9f2, - 0x407b2a05, - 0x407baa1e, - 0x407c2a36, - 0x407c90bd, - 0x407d2a4a, - 0x407daa64, - 0x407e2a75, - 0x407eaa89, - 0x407f2a97, - 0x407faab2, - 0x40801286, - 0x4080aad7, - 0x40812af9, - 0x4081ab14, - 0x40822b29, - 0x4082ab41, - 0x40832b59, - 0x4083ab70, - 0x40842b86, - 0x4084ab92, - 0x40852ba5, - 0x4085abba, - 0x40862bcc, - 0x4086abe1, - 0x40872bea, - 0x40879cde, - 0x40880083, - 0x4088a0e5, - 0x40890a17, - 0x4089a281, - 0x408a1bfe, - 0x408aa2ab, - 0x408b2928, - 0x408ba984, - 0x408c2361, - 0x408c9c2f, - 0x408d1c64, - 0x408d9e76, - 0x408e1ab9, - 0x408e9add, - 0x408f1f2e, - 0x408f9b8b, - 0x41f424ca, - 0x41f9255c, - 0x41fe244f, - 0x41fea680, - 0x41ff2771, - 0x420324e3, - 0x42082505, - 0x4208a541, - 0x42092433, - 0x4209a57b, - 0x420a248a, - 0x420aa46a, - 0x420b24aa, - 0x420ba523, - 0x420c278d, - 0x420ca64d, - 0x420d2667, - 0x420da69e, - 0x421226b8, - 0x42172754, - 0x4217a6fa, - 0x421c271c, - 0x421f26d7, - 0x422127a4, - 0x42262737, - 0x422b283d, - 0x422ba806, - 0x422c2825, - 0x422ca7e0, - 0x422d27bf, - 0x443206ad, - 0x443286bc, - 0x443306c8, - 0x443386d6, - 0x443406e9, - 0x443486fa, - 0x44350701, - 0x4435870b, - 0x4436071e, - 0x44368734, - 0x44370746, - 0x44378753, - 0x44380762, - 0x4438876a, - 0x44390782, - 0x44398790, - 0x443a07a3, - 0x4c3212b0, - 0x4c3292c0, - 0x4c3312d3, - 0x4c3392f3, - 0x4c340094, - 0x4c3480b0, - 0x4c3512ff, - 0x4c35930d, - 0x4c361329, - 0x4c36933c, - 0x4c37134b, - 0x4c379359, - 0x4c38136e, - 0x4c38937a, - 0x4c39139a, - 0x4c3993c4, - 0x4c3a13dd, - 0x4c3a93f6, - 0x4c3b05d0, - 0x4c3b940f, - 0x4c3c1421, - 0x4c3c9430, - 0x4c3d10bd, - 0x4c3d9449, - 0x4c3e1456, - 0x50322e80, - 0x5032ae8f, - 0x50332e9a, - 0x5033aeaa, - 0x50342ec3, - 0x5034aedd, - 0x50352eeb, - 0x5035af01, - 0x50362f13, - 0x5036af29, - 0x50372f42, - 0x5037af55, - 0x50382f6d, - 0x5038af7e, - 0x50392f93, - 0x5039afa7, - 0x503a2fc7, - 0x503aafdd, - 0x503b2ff5, - 0x503bb007, - 0x503c3023, - 0x503cb03a, - 0x503d3053, - 0x503db069, - 0x503e3076, - 0x503eb08c, - 0x503f309e, - 0x503f8348, - 0x504030b1, - 0x5040b0c1, - 0x504130db, - 0x5041b0ea, - 0x50423104, - 0x5042b121, - 0x50433131, - 0x5043b141, - 0x50443150, - 0x50448414, - 0x50453164, - 0x5045b182, - 0x50463195, - 0x5046b1ab, - 0x504731bd, - 0x5047b1d2, - 0x504831f8, - 0x5048b206, - 0x50493219, - 0x5049b22e, - 0x504a3244, - 0x504ab254, - 0x504b3274, - 0x504bb287, - 0x504c32aa, - 0x504cb2d8, - 0x504d32ea, - 0x504db307, - 0x504e3322, - 0x504eb33e, - 0x504f3350, - 0x504fb367, - 0x50503376, - 0x50508687, - 0x50513389, - 0x58320e1f, - 0x68320de1, - 0x68328b9b, - 0x68330bae, - 0x68338def, - 0x68340dff, - 0x683480b0, - 0x6c320dbd, - 0x6c328b7e, - 0x6c330dc8, - 0x7432098d, - 0x783208f2, - 0x78328907, - 0x78330913, + 0x3c328c62, + 0x3c330c7b, + 0x3c338c96, + 0x3c340cb3, + 0x3c348cdd, + 0x3c350cf8, + 0x3c358d1e, + 0x3c360d37, + 0x3c368d4f, + 0x3c370d60, + 0x3c378d6e, + 0x3c380d7b, + 0x3c388d8f, + 0x3c390c25, + 0x3c398da3, + 0x3c3a0db7, + 0x3c3a88ff, + 0x3c3b0dc7, + 0x3c3b8de2, + 0x3c3c0df4, + 0x3c3c8e0a, + 0x3c3d0e14, + 0x3c3d8e28, + 0x3c3e0e36, + 0x3c3e8e5b, + 0x3c3f0c4e, + 0x3c3f8e44, + 0x3c4000ac, + 0x3c4080ea, + 0x3c410cce, + 0x3c418d0d, + 0x403216fa, + 0x40329710, + 0x4033173e, + 0x40339748, + 0x4034175f, + 0x4034977d, + 0x4035178d, + 0x4035979f, + 0x403617ac, + 0x403697b8, + 0x403717cd, + 0x403797df, + 0x403817ea, + 0x403897fc, + 0x40390e8b, + 0x4039980c, + 0x403a181f, + 0x403a9840, + 0x403b1851, + 0x403b9861, + 0x403c0064, + 0x403c8083, + 0x403d186d, + 0x403d9883, + 0x403e1892, + 0x403e98a5, + 0x403f18bf, + 0x403f98cd, + 0x404018e2, + 0x404098f6, + 0x40411913, + 0x4041992e, + 0x40421947, + 0x4042995a, + 0x4043196e, + 0x40439986, + 0x4044199d, + 0x404480ac, + 0x404519b2, + 0x404599c4, + 0x404619e8, + 0x40469a08, + 0x40471a16, + 0x40479a2a, + 0x40481a3f, + 0x40489a58, + 0x40491a6f, + 0x40499a89, + 0x404a1aa0, + 0x404a9abe, + 0x404b1ad6, + 0x404b9aed, + 0x404c1b03, + 0x404c9b15, + 0x404d1b36, + 0x404d9b58, + 0x404e1b6c, + 0x404e9b79, + 0x404f1b90, + 0x404f9ba0, + 0x40501bca, + 0x40509bde, + 0x40511bf9, + 0x40519c09, + 0x40521c20, + 0x40529c32, + 0x40531c4a, + 0x40539c5d, + 0x40541c72, + 0x40549c95, + 0x40551ca3, + 0x40559cc0, + 0x40561ccd, + 0x40569ce6, + 0x40571cfe, + 0x40579d11, + 0x40581d26, + 0x40589d38, + 0x40591d48, + 0x40599d61, + 0x405a1d75, + 0x405a9d85, + 0x405b1d9d, + 0x405b9dae, + 0x405c1dc1, + 0x405c9dd2, + 0x405d1ddf, + 0x405d9df6, + 0x405e1e16, + 0x405e8a95, + 0x405f1e37, + 0x405f9e44, + 0x40601e52, + 0x40609e74, + 0x40611e9c, + 0x40619eb1, + 0x40621ec8, + 0x40629ed9, + 0x40631eea, + 0x40639eff, + 0x40641f16, + 0x40649f27, + 0x40651f42, + 0x40659f59, + 0x40661f71, + 0x40669f9b, + 0x40671fc6, + 0x40679fe7, + 0x40681ffa, + 0x4068a01b, + 0x4069204d, + 0x4069a07b, + 0x406a209c, + 0x406aa0bc, + 0x406b2244, + 0x406ba267, + 0x406c227d, + 0x406ca4a9, + 0x406d24d8, + 0x406da500, + 0x406e2519, + 0x406ea531, + 0x406f2550, + 0x406fa565, + 0x40702578, + 0x4070a595, + 0x40710800, + 0x4071a5a7, + 0x407225ba, + 0x4072a5d3, + 0x407325eb, + 0x4073936d, + 0x407425ff, + 0x4074a619, + 0x4075262a, + 0x4075a63e, + 0x4076264c, + 0x407691aa, + 0x40772671, + 0x4077a693, + 0x407826ae, + 0x4078a6c3, + 0x407926da, + 0x4079a6f0, + 0x407a26fc, + 0x407aa70f, + 0x407b2724, + 0x407ba736, + 0x407c274b, + 0x407ca754, + 0x407d2036, + 0x407d9bb0, + 0x41f4216f, + 0x41f92201, + 0x41fe20f4, + 0x41fea2d0, + 0x41ff23c1, + 0x42032188, + 0x420821aa, + 0x4208a1e6, + 0x420920d8, + 0x4209a220, + 0x420a212f, + 0x420aa10f, + 0x420b214f, + 0x420ba1c8, + 0x420c23dd, + 0x420ca29d, + 0x420d22b7, + 0x420da2ee, + 0x42122308, + 0x421723a4, + 0x4217a34a, + 0x421c236c, + 0x421f2327, + 0x422123f4, + 0x42262387, + 0x422b248d, + 0x422ba456, + 0x422c2475, + 0x422ca430, + 0x422d240f, + 0x4432072b, + 0x4432873a, + 0x44330746, + 0x44338754, + 0x44340767, + 0x44348778, + 0x4435077f, + 0x44358789, + 0x4436079c, + 0x443687b2, + 0x443707c4, + 0x443787d1, + 0x443807e0, + 0x443887e8, + 0x44390800, + 0x4439880e, + 0x443a0821, + 0x4c3211d4, + 0x4c3291e4, + 0x4c3311f7, + 0x4c339217, + 0x4c3400ac, + 0x4c3480ea, + 0x4c351223, + 0x4c359231, + 0x4c36124d, + 0x4c369260, + 0x4c37126f, + 0x4c37927d, + 0x4c381292, + 0x4c38929e, + 0x4c3912be, + 0x4c3992e8, + 0x4c3a1301, + 0x4c3a931a, + 0x4c3b05fb, + 0x4c3b9333, + 0x4c3c1345, + 0x4c3c9354, + 0x4c3d136d, + 0x4c3d937c, + 0x4c3e1389, + 0x503229cd, + 0x5032a9dc, + 0x503329e7, + 0x5033a9f7, + 0x50342a10, + 0x5034aa2a, + 0x50352a38, + 0x5035aa4e, + 0x50362a60, + 0x5036aa76, + 0x50372a8f, + 0x5037aaa2, + 0x50382aba, + 0x5038aacb, + 0x50392ae0, + 0x5039aaf4, + 0x503a2b14, + 0x503aab2a, + 0x503b2b42, + 0x503bab54, + 0x503c2b70, + 0x503cab87, + 0x503d2ba0, + 0x503dabb6, + 0x503e2bc3, + 0x503eabd9, + 0x503f2beb, + 0x503f8382, + 0x50402bfe, + 0x5040ac0e, + 0x50412c28, + 0x5041ac37, + 0x50422c51, + 0x5042ac6e, + 0x50432c7e, + 0x5043ac8e, + 0x50442c9d, + 0x5044843f, + 0x50452cb1, + 0x5045accf, + 0x50462ce2, + 0x5046acf8, + 0x50472d0a, + 0x5047ad1f, + 0x50482d45, + 0x5048ad53, + 0x50492d66, + 0x5049ad7b, + 0x504a2d91, + 0x504aada1, + 0x504b2dc1, + 0x504badd4, + 0x504c2df7, + 0x504cae25, + 0x504d2e37, + 0x504dae54, + 0x504e2e6f, + 0x504eae8b, + 0x504f2e9d, + 0x504faeb4, + 0x50502ec3, + 0x505086ef, + 0x50512ed6, + 0x58320ec9, + 0x68320e8b, + 0x68328c25, + 0x68330c38, + 0x68338e99, + 0x68340ea9, + 0x683480ea, + 0x6c320e67, + 0x6c328bfc, + 0x6c330e72, + 0x74320a0b, + 0x78320970, + 0x78328985, + 0x78330991, 0x78338083, - 0x78340922, - 0x78348937, - 0x78350956, - 0x78358978, - 0x7836098d, - 0x783689a3, - 0x783709b3, - 0x783789c6, - 0x783809d9, - 0x783889eb, - 0x783909f8, - 0x78398a17, - 0x783a0a2c, - 0x783a8a3a, - 0x783b0a44, - 0x783b8a58, - 0x783c0a6f, - 0x783c8a84, - 0x783d0a9b, - 0x783d8ab0, - 0x783e0a06, - 0x7c3211b2, + 0x783409a0, + 0x783489b5, + 0x783509d4, + 0x783589f6, + 0x78360a0b, + 0x78368a21, + 0x78370a31, + 0x78378a44, + 0x78380a57, + 0x78388a69, + 0x78390a76, + 0x78398a95, + 0x783a0aaa, + 0x783a8ab8, + 0x783b0ac2, + 0x783b8ad6, + 0x783c0aed, + 0x783c8b02, + 0x783d0b19, + 0x783d8b2e, + 0x783e0a84, + 0x7c3210d6, }; const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); @@ -819,8 +888,10 @@ Pod::Spec.new do |s| "BN_LIB\\0" "BOOLEAN_IS_WRONG_LENGTH\\0" "BUFFER_TOO_SMALL\\0" + "CONTEXT_NOT_INITIALISED\\0" "DECODE_ERROR\\0" "DEPTH_EXCEEDED\\0" + "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\\0" "ENCODE_ERROR\\0" "ERROR_GETTING_TIME\\0" "EXPECTING_AN_ASN1_SEQUENCE\\0" @@ -861,7 +932,6 @@ Pod::Spec.new do |s| "INVALID_UNIVERSALSTRING_LENGTH\\0" "INVALID_UTF8STRING\\0" "LIST_ERROR\\0" - "MALLOC_FAILURE\\0" "MISSING_ASN1_EOS\\0" "MISSING_EOC\\0" "MISSING_SECOND_NUMBER\\0" @@ -893,10 +963,13 @@ Pod::Spec.new do |s| "UNEXPECTED_EOC\\0" "UNIVERSALSTRING_IS_WRONG_LENGTH\\0" "UNKNOWN_FORMAT\\0" + "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\\0" + "UNKNOWN_SIGNATURE_ALGORITHM\\0" "UNKNOWN_TAG\\0" "UNSUPPORTED_ANY_DEFINED_BY_TYPE\\0" "UNSUPPORTED_PUBLIC_KEY_TYPE\\0" "UNSUPPORTED_TYPE\\0" + "WRONG_PUBLIC_KEY_TYPE\\0" "WRONG_TAG\\0" "WRONG_TYPE\\0" "BAD_FOPEN_MODE\\0" @@ -969,6 +1042,7 @@ Pod::Spec.new do |s| "MODULUS_TOO_LARGE\\0" "NO_PRIVATE_VALUE\\0" "BAD_Q_VALUE\\0" + "BAD_VERSION\\0" "MISSING_PARAMETERS\\0" "NEED_NEW_SETUP_VALUES\\0" "BIGNUM_OUT_OF_RANGE\\0" @@ -976,8 +1050,10 @@ Pod::Spec.new do |s| "D2I_ECPKPARAMETERS_FAILURE\\0" "EC_GROUP_NEW_BY_NAME_FAILURE\\0" "GROUP2PKPARAMETERS_FAILURE\\0" + "GROUP_MISMATCH\\0" "I2D_ECPKPARAMETERS_FAILURE\\0" "INCOMPATIBLE_OBJECTS\\0" + "INVALID_COFACTOR\\0" "INVALID_COMPRESSED_POINT\\0" "INVALID_COMPRESSION_BIT\\0" "INVALID_ENCODING\\0" @@ -1002,27 +1078,19 @@ Pod::Spec.new do |s| "NOT_IMPLEMENTED\\0" "RANDOM_NUMBER_GENERATION_FAILED\\0" "OPERATION_NOT_SUPPORTED\\0" - "BN_DECODE_ERROR\\0" "COMMAND_NOT_SUPPORTED\\0" - "CONTEXT_NOT_INITIALISED\\0" "DIFFERENT_KEY_TYPES\\0" "DIFFERENT_PARAMETERS\\0" - "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\\0" "EXPECTING_AN_EC_KEY_KEY\\0" "EXPECTING_AN_RSA_KEY\\0" - "EXPECTING_A_DH_KEY\\0" "EXPECTING_A_DSA_KEY\\0" "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\\0" - "INVALID_CURVE\\0" "INVALID_DIGEST_LENGTH\\0" "INVALID_DIGEST_TYPE\\0" "INVALID_KEYBITS\\0" "INVALID_MGF1_MD\\0" "INVALID_PADDING_MODE\\0" - "INVALID_PSS_PARAMETERS\\0" "INVALID_PSS_SALTLEN\\0" - "INVALID_SALT_LENGTH\\0" - "INVALID_TRAILER\\0" "KEYS_NOT_SET\\0" "NO_DEFAULT_DIGEST\\0" "NO_KEY_SET\\0" @@ -1032,17 +1100,8 @@ Pod::Spec.new do |s| "NO_PARAMETERS_SET\\0" "OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE\\0" "OPERATON_NOT_INITIALIZED\\0" - "PARAMETER_ENCODING_ERROR\\0" - "UNKNOWN_DIGEST\\0" - "UNKNOWN_MASK_DIGEST\\0" - "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\\0" "UNKNOWN_PUBLIC_KEY_TYPE\\0" - "UNKNOWN_SIGNATURE_ALGORITHM\\0" "UNSUPPORTED_ALGORITHM\\0" - "UNSUPPORTED_MASK_ALGORITHM\\0" - "UNSUPPORTED_MASK_PARAMETER\\0" - "UNSUPPORTED_SIGNATURE_TYPE\\0" - "WRONG_PUBLIC_KEY_TYPE\\0" "OUTPUT_TOO_LARGE\\0" "UNKNOWN_NID\\0" "BAD_BASE64_DECODE\\0" @@ -1078,13 +1137,13 @@ Pod::Spec.new do |s| "UNKNOWN_ALGORITHM\\0" "UNKNOWN_CIPHER\\0" "UNKNOWN_CIPHER_ALGORITHM\\0" + "UNKNOWN_DIGEST\\0" "UNKNOWN_HASH\\0" "UNSUPPORTED_PRIVATE_KEY_ALGORITHM\\0" "BAD_E_VALUE\\0" "BAD_FIXED_HEADER_DECRYPT\\0" "BAD_PAD_BYTE_COUNT\\0" "BAD_RSA_PARAMETERS\\0" - "BAD_VERSION\\0" "BLOCK_TYPE_IS_NOT_01\\0" "BN_NOT_INITIALIZED\\0" "CANNOT_RECOVER_MULTI_PRIME_KEY\\0" @@ -1129,7 +1188,6 @@ Pod::Spec.new do |s| "BAD_DIGEST_LENGTH\\0" "BAD_ECC_CERT\\0" "BAD_ECPOINT\\0" - "BAD_HANDSHAKE_LENGTH\\0" "BAD_HANDSHAKE_RECORD\\0" "BAD_HELLO_REQUEST\\0" "BAD_LENGTH\\0" @@ -1140,7 +1198,6 @@ Pod::Spec.new do |s| "BAD_SSL_FILETYPE\\0" "BAD_WRITE_RETRY\\0" "BIO_NOT_SET\\0" - "CANNOT_SERIALIZE_PUBLIC_KEY\\0" "CA_DN_LENGTH_MISMATCH\\0" "CA_DN_TOO_LONG\\0" "CCS_RECEIVED_EARLY\\0" @@ -1149,17 +1206,12 @@ Pod::Spec.new do |s| "CERT_LENGTH_MISMATCH\\0" "CHANNEL_ID_NOT_P256\\0" "CHANNEL_ID_SIGNATURE_INVALID\\0" - "CIPHER_CODE_WRONG_LENGTH\\0" "CIPHER_OR_HASH_UNAVAILABLE\\0" "CLIENTHELLO_PARSE_FAILED\\0" "CLIENTHELLO_TLSEXT\\0" "CONNECTION_REJECTED\\0" "CONNECTION_TYPE_NOT_SET\\0" - "COOKIE_MISMATCH\\0" - "CUSTOM_EXTENSION_CONTENTS_TOO_LARGE\\0" "CUSTOM_EXTENSION_ERROR\\0" - "D2I_ECDSA_SIG\\0" - "DATA_BETWEEN_CCS_AND_FINISHED\\0" "DATA_LENGTH_TOO_LONG\\0" "DECRYPTION_FAILED\\0" "DECRYPTION_FAILED_OR_BAD_RECORD_MAC\\0" @@ -1168,38 +1220,28 @@ Pod::Spec.new do |s| "DIGEST_CHECK_FAILED\\0" "DTLS_MESSAGE_TOO_BIG\\0" "ECC_CERT_NOT_FOR_SIGNING\\0" - "EMPTY_SRTP_PROTECTION_PROFILE_LIST\\0" "EMS_STATE_INCONSISTENT\\0" "ENCRYPTED_LENGTH_TOO_LONG\\0" "ERROR_ADDING_EXTENSION\\0" "ERROR_IN_RECEIVED_CIPHER_LIST\\0" "ERROR_PARSING_EXTENSION\\0" - "EVP_DIGESTSIGNFINAL_FAILED\\0" - "EVP_DIGESTSIGNINIT_FAILED\\0" "EXCESSIVE_MESSAGE_SIZE\\0" "EXTRA_DATA_IN_MESSAGE\\0" "FRAGMENT_MISMATCH\\0" - "GOT_A_FIN_BEFORE_A_CCS\\0" - "GOT_CHANNEL_ID_BEFORE_A_CCS\\0" - "GOT_NEXT_PROTO_BEFORE_A_CCS\\0" "GOT_NEXT_PROTO_WITHOUT_EXTENSION\\0" "HANDSHAKE_FAILURE_ON_CLIENT_HELLO\\0" - "HANDSHAKE_RECORD_BEFORE_CCS\\0" "HTTPS_PROXY_REQUEST\\0" "HTTP_REQUEST\\0" "INAPPROPRIATE_FALLBACK\\0" "INVALID_COMMAND\\0" "INVALID_MESSAGE\\0" + "INVALID_OUTER_RECORD_TYPE\\0" "INVALID_SSL_SESSION\\0" "INVALID_TICKET_KEYS_LENGTH\\0" "LENGTH_MISMATCH\\0" "LIBRARY_HAS_NO_CIPHERS\\0" - "MISSING_DH_KEY\\0" - "MISSING_ECDSA_SIGNING_CERT\\0" "MISSING_EXTENSION\\0" "MISSING_RSA_CERTIFICATE\\0" - "MISSING_RSA_ENCRYPTING_CERT\\0" - "MISSING_RSA_SIGNING_CERT\\0" "MISSING_TMP_DH_KEY\\0" "MISSING_TMP_ECDH_KEY\\0" "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\\0" @@ -1211,7 +1253,6 @@ Pod::Spec.new do |s| "NO_CERTIFICATE_SET\\0" "NO_CIPHERS_AVAILABLE\\0" "NO_CIPHERS_PASSED\\0" - "NO_CIPHERS_SPECIFIED\\0" "NO_CIPHER_MATCH\\0" "NO_COMPRESSION_SPECIFIED\\0" "NO_METHOD_SPECIFIED\\0" @@ -1220,13 +1261,10 @@ Pod::Spec.new do |s| "NO_RENEGOTIATION\\0" "NO_REQUIRED_DIGEST\\0" "NO_SHARED_CIPHER\\0" - "NO_SHARED_SIGATURE_ALGORITHMS\\0" - "NO_SRTP_PROFILES\\0" "NULL_SSL_CTX\\0" "NULL_SSL_METHOD_PASSED\\0" "OLD_SESSION_CIPHER_NOT_RETURNED\\0" "OLD_SESSION_VERSION_NOT_RETURNED\\0" - "PACKET_LENGTH_TOO_LONG\\0" "PARSE_TLSEXT\\0" "PATH_TOO_LONG\\0" "PEER_DID_NOT_RETURN_A_CERTIFICATE\\0" @@ -1235,11 +1273,9 @@ Pod::Spec.new do |s| "PSK_IDENTITY_NOT_FOUND\\0" "PSK_NO_CLIENT_CB\\0" "PSK_NO_SERVER_CB\\0" - "READ_BIO_NOT_SET\\0" "READ_TIMEOUT_EXPIRED\\0" "RECORD_LENGTH_MISMATCH\\0" "RECORD_TOO_LARGE\\0" - "RENEGOTIATE_EXT_TOO_LONG\\0" "RENEGOTIATION_ENCODING_ERR\\0" "RENEGOTIATION_MISMATCH\\0" "REQUIRED_CIPHER_MISSING\\0" @@ -1249,13 +1285,11 @@ Pod::Spec.new do |s| "SERVERHELLO_TLSEXT\\0" "SESSION_ID_CONTEXT_UNINITIALIZED\\0" "SESSION_MAY_NOT_BE_CREATED\\0" - "SIGNATURE_ALGORITHMS_ERROR\\0" + "SHUTDOWN_WHILE_IN_INIT\\0" "SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER\\0" "SRTP_COULD_NOT_ALLOCATE_PROFILES\\0" - "SRTP_PROTECTION_PROFILE_LIST_TOO_LONG\\0" "SRTP_UNKNOWN_PROTECTION_PROFILE\\0" "SSL3_EXT_INVALID_SERVERNAME\\0" - "SSL3_EXT_INVALID_SERVERNAME_TYPE\\0" "SSLV3_ALERT_BAD_CERTIFICATE\\0" "SSLV3_ALERT_BAD_RECORD_MAC\\0" "SSLV3_ALERT_CERTIFICATE_EXPIRED\\0" @@ -1270,10 +1304,7 @@ Pod::Spec.new do |s| "SSLV3_ALERT_UNSUPPORTED_CERTIFICATE\\0" "SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION\\0" "SSL_HANDSHAKE_FAILURE\\0" - "SSL_SESSION_ID_CALLBACK_FAILED\\0" - "SSL_SESSION_ID_CONFLICT\\0" "SSL_SESSION_ID_CONTEXT_TOO_LONG\\0" - "SSL_SESSION_ID_HAS_BAD_LENGTH\\0" "TLSV1_ALERT_ACCESS_DENIED\\0" "TLSV1_ALERT_DECODE_ERROR\\0" "TLSV1_ALERT_DECRYPTION_FAILED\\0" @@ -1292,17 +1323,12 @@ Pod::Spec.new do |s| "TLSV1_CERTIFICATE_UNOBTAINABLE\\0" "TLSV1_UNRECOGNIZED_NAME\\0" "TLSV1_UNSUPPORTED_EXTENSION\\0" - "TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER\\0" - "TLS_ILLEGAL_EXPORTER_LABEL\\0" - "TLS_INVALID_ECPOINTFORMAT_LIST\\0" "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\\0" "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\\0" "TOO_MANY_EMPTY_FRAGMENTS\\0" "TOO_MANY_WARNING_ALERTS\\0" "UNABLE_TO_FIND_ECDH_PARAMETERS\\0" - "UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS\\0" "UNEXPECTED_EXTENSION\\0" - "UNEXPECTED_GROUP_CLOSE\\0" "UNEXPECTED_MESSAGE\\0" "UNEXPECTED_OPERATOR_IN_GROUP\\0" "UNEXPECTED_RECORD\\0" @@ -1314,13 +1340,10 @@ Pod::Spec.new do |s| "UNKNOWN_PROTOCOL\\0" "UNKNOWN_SSL_VERSION\\0" "UNKNOWN_STATE\\0" - "UNPROCESSED_HANDSHAKE_DATA\\0" "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\\0" "UNSUPPORTED_COMPRESSION_ALGORITHM\\0" "UNSUPPORTED_ELLIPTIC_CURVE\\0" "UNSUPPORTED_PROTOCOL\\0" - "UNSUPPORTED_SSL_VERSION\\0" - "USE_SRTP_NOT_NEGOTIATED\\0" "WRONG_CERTIFICATE_TYPE\\0" "WRONG_CIPHER_RETURNED\\0" "WRONG_CURVE\\0" @@ -1341,12 +1364,14 @@ Pod::Spec.new do |s| "IDP_MISMATCH\\0" "INVALID_DIRECTORY\\0" "INVALID_FIELD_NAME\\0" + "INVALID_PSS_PARAMETERS\\0" "INVALID_TRUST\\0" "ISSUER_MISMATCH\\0" "KEY_TYPE_MISMATCH\\0" "KEY_VALUES_MISMATCH\\0" "LOADING_CERT_DIR\\0" "LOADING_DEFAULTS\\0" + "NAME_TOO_LONG\\0" "NEWER_CRL_NOT_NEWER\\0" "NOT_PKCS7_SIGNED_DATA\\0" "NO_CERTIFICATES_INCLUDED\\0" @@ -1356,8 +1381,6 @@ Pod::Spec.new do |s| "PUBLIC_KEY_DECODE_ERROR\\0" "PUBLIC_KEY_ENCODE_ERROR\\0" "SHOULD_RETRY\\0" - "UNABLE_TO_FIND_PARAMETERS_IN_CHAIN\\0" - "UNABLE_TO_GET_CERTS_PUBLIC_KEY\\0" "UNKNOWN_KEY_TYPE\\0" "UNKNOWN_PURPOSE_ID\\0" "UNKNOWN_TRUST_ID\\0" diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec index 20af7647f7..3ebcacf055 100644 --- a/src/objective-c/CronetFramework.podspec +++ b/src/objective-c/CronetFramework.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| s.license = { :type => 'BSD' } s.vendored_framework = "Cronet.framework" s.author = "The Chromium Authors" - s.ios.deployment_target = "8.0" + s.ios.deployment_target = "7.1" s.source = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' } s.preserve_paths = "Cronet.framework" s.public_header_files = "Cronet.framework/Headers/**/*{.h}" diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index e9678f38a9..da9473f9a2 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -377,6 +377,7 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey"; [strongSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain code:GRPCErrorCodeUnavailable userInfo:@{NSLocalizedDescriptionKey: @"Connectivity lost."}]]; + [[GRPCHost hostWithAddress:strongSelf->_host] disconnect]; } }]; } diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m index a3fa5938cd..97f6b89340 100644 --- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m +++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m @@ -252,7 +252,7 @@ // Each completion queue consumes one thread. There's a trade to be made between creating and // consuming too many threads and having contention of multiple calls in a single completion - // queue. Currently we favor latency and use one per call. + // queue. Currently we use a singleton queue. _queue = [GRPCCompletionQueue completionQueue]; _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path completionQueue:_queue]; diff --git a/src/objective-c/GRPCClient/private/NSData+GRPC.m b/src/objective-c/GRPCClient/private/NSData+GRPC.m index 1238374af3..98337799e9 100644 --- a/src/objective-c/GRPCClient/private/NSData+GRPC.m +++ b/src/objective-c/GRPCClient/private/NSData+GRPC.m @@ -42,7 +42,15 @@ static void MallocAndCopyByteBufferToCharArray(grpc_byte_buffer *buffer, size_t *length, char **array) { grpc_byte_buffer_reader reader; - grpc_byte_buffer_reader_init(&reader, buffer); + if (!grpc_byte_buffer_reader_init(&reader, buffer)) { + // grpc_byte_buffer_reader_init can fail if the data sent by the server + // could not be decompressed for any reason. This is an issue with the data + // coming from the server and thus we want the RPC to fail with error code + // INTERNAL. + *array = NULL; + *length = 0; + return; + } // The slice contains uncompressed data even if compressed data was received // because the reader takes care of automatically decompressing it gpr_slice slice = grpc_byte_buffer_reader_readall(&reader); diff --git a/src/objective-c/ProtoRPC/ProtoRPC.m b/src/objective-c/ProtoRPC/ProtoRPC.m index fb0b566f19..e7232f2683 100644 --- a/src/objective-c/ProtoRPC/ProtoRPC.m +++ b/src/objective-c/ProtoRPC/ProtoRPC.m @@ -33,7 +33,7 @@ #import "ProtoRPC.h" -#import <GPBProtocolBuffers.h> +#import <Protobuf/GPBProtocolBuffers.h> #import <RxLibrary/GRXWriteable.h> #import <RxLibrary/GRXWriter+Transformations.h> diff --git a/src/objective-c/README.md b/src/objective-c/README.md index 30d9aad64c..736c324ca9 100644 --- a/src/objective-c/README.md +++ b/src/objective-c/README.md @@ -47,6 +47,10 @@ Pod::Spec.new do |s| s.name = '<Podspec file name>' s.version = '0.0.1' s.license = '...' + s.authors = { '<your name>' => '<your email>' } + s.homepage = '...' + s.summary = '...' + s.source = { :git => 'https://github.com/...' } s.ios.deployment_target = '7.1' s.osx.deployment_target = '10.9' @@ -60,7 +64,11 @@ Pod::Spec.new do |s| ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + ms.dependency "Protobuf", "~> 3.0.0-beta-2" + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end # The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with @@ -69,7 +77,7 @@ Pod::Spec.new do |s| ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + ss.dependency "gRPC-ProtoRPC", "~> 0.14" ss.dependency "#{s.name}/Messages" end end diff --git a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec index 107e6de4e2..e3b50ddea5 100644 --- a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec @@ -1,10 +1,10 @@ Pod::Spec.new do |s| - s.name = "RemoteTest" - s.version = "0.0.1" - s.license = "New BSD" + s.name = 'RemoteTest' + s.version = '0.0.1' + s.license = 'New BSD' s.authors = { 'gRPC contributors' => 'grpc-io@googlegroups.com' } - s.homepage = "http://www.grpc.io/" - s.summary = "RemoteTest example" + s.homepage = 'http://www.grpc.io/' + s.summary = 'RemoteTest example' s.source = { :git => 'https://github.com/grpc/grpc.git' } s.ios.deployment_target = '7.1' @@ -15,18 +15,22 @@ Pod::Spec.new do |s| protoc --objc_out=. --objcgrpc_out=. *.proto CMD - s.subspec "Messages" do |ms| - ms.source_files = "*.pbobjc.{h,m}" - ms.header_mappings_dir = "." + s.subspec 'Messages' do |ms| + ms.source_files = '*.pbobjc.{h,m}' + ms.header_mappings_dir = '.' ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + ms.dependency 'Protobuf', '~> 3.0.0-beta-3.1' + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end - s.subspec "Services" do |ss| - ss.source_files = "*.pbrpc.{h,m}" - ss.header_mappings_dir = "." + s.subspec 'Services' do |ss| + ss.source_files = '*.pbrpc.{h,m}' + ss.header_mappings_dir = '.' ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + ss.dependency 'gRPC-ProtoRPC', '~> 0.14' ss.dependency "#{s.name}/Messages" end end diff --git a/src/objective-c/examples/Sample/Podfile b/src/objective-c/examples/Sample/Podfile index 93859fb734..80ab2c320d 100644 --- a/src/objective-c/examples/Sample/Podfile +++ b/src/objective-c/examples/Sample/Podfile @@ -1,10 +1,43 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios, '8.0' -pod 'Protobuf', :path => "../../../../third_party/protobuf" -pod 'BoringSSL', :podspec => "../.." -pod 'gRPC', :path => "../../../.." -pod 'RemoteTest', :path => "../RemoteTestClient" +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../../..' target 'Sample' do + # Depend on the generated RemoteTestClient library + pod 'RemoteTest', :path => "../RemoteTestClient" + + # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following + # lines in your application. + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC +end + +# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in +# your application. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec + + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end diff --git a/src/objective-c/examples/SwiftSample/Podfile b/src/objective-c/examples/SwiftSample/Podfile index f2df4a34a3..b675fd29ef 100644 --- a/src/objective-c/examples/SwiftSample/Podfile +++ b/src/objective-c/examples/SwiftSample/Podfile @@ -1,10 +1,43 @@ source 'https://github.com/CocoaPods/Specs.git' platform :ios, '8.0' -pod 'Protobuf', :path => "../../../../third_party/protobuf" -pod 'BoringSSL', :podspec => "../.." -pod 'gRPC', :path => "../../../.." -pod 'RemoteTest', :path => "../RemoteTestClient" +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../../..' target 'SwiftSample' do + # Depend on the generated RemoteTestClient library + pod 'RemoteTest', :path => "../RemoteTestClient" + + # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following + # lines in your application. + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC +end + +# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in +# your application. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec + + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end diff --git a/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard b/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard new file mode 100644 index 0000000000..9a05b8635d --- /dev/null +++ b/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard @@ -0,0 +1,16 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="11129.15" systemVersion="15F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" colorMatched="YES" initialViewController="BYZ-38-t0r"> + <dependencies> + <deployment identifier="iOS"/> + <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="11103.10"/> + </dependencies> + <scenes> + <!--View Controller--> + <scene sceneID="tne-QT-ifu"> + <objects> + <viewController id="BYZ-38-t0r" customClass="ViewController" sceneMemberID="viewController"/> + <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/> + </objects> + </scene> + </scenes> +</document> diff --git a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj new file mode 100644 index 0000000000..2a9466c03f --- /dev/null +++ b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj @@ -0,0 +1,353 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 500A4E0AC9D489EB214D1ED4 /* libPods-ConnectivityTestingApp.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */; }; + 63BFB9CC1D2478DD00E17927 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 63BFB9CB1D2478DD00E17927 /* main.m */; }; + 63BFB9D21D2478DD00E17927 /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 63BFB9D11D2478DD00E17927 /* ViewController.m */; }; + 63BFB9D51D2478DD00E17927 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 63BFB9D31D2478DD00E17927 /* Main.storyboard */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = ConnectivityTestingApp.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 63BFB9CB1D2478DD00E17927 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = SOURCE_ROOT; }; + 63BFB9D11D2478DD00E17927 /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = SOURCE_ROOT; }; + 63BFB9D41D2478DD00E17927 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; }; + 63BFB9DB1D2478DD00E17927 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = SOURCE_ROOT; }; + BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-ConnectivityTestingApp.release.xcconfig"; path = "Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp.release.xcconfig"; sourceTree = "<group>"; }; + C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-ConnectivityTestingApp.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-ConnectivityTestingApp.debug.xcconfig"; path = "Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp.debug.xcconfig"; sourceTree = "<group>"; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 63BFB9C41D2478DD00E17927 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 500A4E0AC9D489EB214D1ED4 /* libPods-ConnectivityTestingApp.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 16E6C67F2E48B42376DFFD2A /* Pods */ = { + isa = PBXGroup; + children = ( + FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */, + BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */, + ); + name = Pods; + sourceTree = "<group>"; + }; + 48F8EC18C66D3416A41F76F5 /* Frameworks */ = { + isa = PBXGroup; + children = ( + C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */, + ); + name = Frameworks; + sourceTree = "<group>"; + }; + 63BFB9BE1D2478DD00E17927 = { + isa = PBXGroup; + children = ( + 63BFB9D11D2478DD00E17927 /* ViewController.m */, + 63BFB9D31D2478DD00E17927 /* Main.storyboard */, + 63BFB9DB1D2478DD00E17927 /* Info.plist */, + 63BFB9CB1D2478DD00E17927 /* main.m */, + 63BFB9C81D2478DD00E17927 /* Products */, + 16E6C67F2E48B42376DFFD2A /* Pods */, + 48F8EC18C66D3416A41F76F5 /* Frameworks */, + ); + sourceTree = "<group>"; + }; + 63BFB9C81D2478DD00E17927 /* Products */ = { + isa = PBXGroup; + children = ( + 63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */, + ); + name = Products; + sourceTree = "<group>"; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 63BFB9C61D2478DD00E17927 /* ConnectivityTestingApp */ = { + isa = PBXNativeTarget; + buildConfigurationList = 63BFB9DE1D2478DD00E17927 /* Build configuration list for PBXNativeTarget "ConnectivityTestingApp" */; + buildPhases = ( + 4DCA2703A0AA5DC1BD2751B8 /* [CP] Check Pods Manifest.lock */, + 63BFB9C31D2478DD00E17927 /* Sources */, + 63BFB9C41D2478DD00E17927 /* Frameworks */, + 63BFB9C51D2478DD00E17927 /* Resources */, + 8593A2388A8F7BF5A7E98D26 /* [CP] Embed Pods Frameworks */, + 5347BF6C41E7888C1C05CD88 /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = ConnectivityTestingApp; + productName = ConnectivityTestingApp; + productReference = 63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 63BFB9BF1D2478DD00E17927 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0800; + ORGANIZATIONNAME = gRPC; + TargetAttributes = { + 63BFB9C61D2478DD00E17927 = { + CreatedOnToolsVersion = 8.0; + DevelopmentTeam = EQHXZ8M8AV; + DevelopmentTeamName = "Google, Inc."; + ProvisioningStyle = Automatic; + }; + }; + }; + buildConfigurationList = 63BFB9C21D2478DD00E17927 /* Build configuration list for PBXProject "ConnectivityTestingApp" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 63BFB9BE1D2478DD00E17927; + productRefGroup = 63BFB9C81D2478DD00E17927 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 63BFB9C61D2478DD00E17927 /* ConnectivityTestingApp */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 63BFB9C51D2478DD00E17927 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 63BFB9D51D2478DD00E17927 /* Main.storyboard in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 4DCA2703A0AA5DC1BD2751B8 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; + 5347BF6C41E7888C1C05CD88 /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + 8593A2388A8F7BF5A7E98D26 /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Embed Pods Frameworks"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 63BFB9C31D2478DD00E17927 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 63BFB9D21D2478DD00E17927 /* ViewController.m in Sources */, + 63BFB9CC1D2478DD00E17927 /* main.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXVariantGroup section */ + 63BFB9D31D2478DD00E17927 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 63BFB9D41D2478DD00E17927 /* Base */, + ); + name = Main.storyboard; + path = .; + sourceTree = SOURCE_ROOT; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 63BFB9DC1D2478DD00E17927 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + }; + name = Debug; + }; + 63BFB9DD1D2478DD00E17927 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.0; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 63BFB9DF1D2478DD00E17927 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + INFOPLIST_FILE = Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.ConnectivityTestingApp; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 63BFB9E01D2478DD00E17927 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + INFOPLIST_FILE = Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.ConnectivityTestingApp; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 63BFB9C21D2478DD00E17927 /* Build configuration list for PBXProject "ConnectivityTestingApp" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 63BFB9DC1D2478DD00E17927 /* Debug */, + 63BFB9DD1D2478DD00E17927 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 63BFB9DE1D2478DD00E17927 /* Build configuration list for PBXNativeTarget "ConnectivityTestingApp" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 63BFB9DF1D2478DD00E17927 /* Debug */, + 63BFB9E01D2478DD00E17927 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 63BFB9BF1D2478DD00E17927 /* Project object */; +} diff --git a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000000..b541b4b44d --- /dev/null +++ b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Workspace + version = "1.0"> + <FileRef + location = "self:ConnectivityTestingApp.xcodeproj"> + </FileRef> +</Workspace> diff --git a/src/objective-c/tests/Connectivity/Info.plist b/src/objective-c/tests/Connectivity/Info.plist new file mode 100644 index 0000000000..8a9fb88701 --- /dev/null +++ b/src/objective-c/tests/Connectivity/Info.plist @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>CFBundleDevelopmentRegion</key> + <string>en</string> + <key>CFBundleExecutable</key> + <string>$(EXECUTABLE_NAME)</string> + <key>CFBundleIdentifier</key> + <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string> + <key>CFBundleInfoDictionaryVersion</key> + <string>6.0</string> + <key>CFBundleName</key> + <string>$(PRODUCT_NAME)</string> + <key>CFBundlePackageType</key> + <string>APPL</string> + <key>CFBundleShortVersionString</key> + <string>1.0</string> + <key>CFBundleSignature</key> + <string>????</string> + <key>CFBundleVersion</key> + <string>1</string> + <key>LSRequiresIPhoneOS</key> + <true/> + <key>UILaunchStoryboardName</key> + <string>Main</string> + <key>UIMainStoryboardFile</key> + <string>Main</string> + <key>UIRequiredDeviceCapabilities</key> + <array> + <string>armv7</string> + </array> + <key>UISupportedInterfaceOrientations</key> + <array> + <string>UIInterfaceOrientationPortrait</string> + <string>UIInterfaceOrientationLandscapeLeft</string> + <string>UIInterfaceOrientationLandscapeRight</string> + </array> +</dict> +</plist> diff --git a/src/objective-c/tests/Connectivity/Podfile b/src/objective-c/tests/Connectivity/Podfile new file mode 100644 index 0000000000..f9224d9e4e --- /dev/null +++ b/src/objective-c/tests/Connectivity/Podfile @@ -0,0 +1,10 @@ +install! 'cocoapods', :deterministic_uuids => false + +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../../..' + +target 'ConnectivityTestingApp' do + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" +end diff --git a/src/objective-c/tests/Connectivity/README.md b/src/objective-c/tests/Connectivity/README.md new file mode 100644 index 0000000000..851cb9d1da --- /dev/null +++ b/src/objective-c/tests/Connectivity/README.md @@ -0,0 +1,16 @@ +This app can be used to manually test gRPC under changing network conditions. + +It makes RPCs in a loop, logging when the request is sent and the response is received. + +To test on the simulator, run `pod install`, open the workspace created by Cocoapods, and run the app. +Once running, disable WiFi (or ethernet) _in your computer_, then enable it again after a while. Don't +bother with the simulator's WiFi or cell settings, as they have no effect: Simulator apps are just Mac +apps running within the simulator UI. + +The expected result is to never see a "hanged" RPC: success or failure should happen almost immediately +after sending the request. Symptom of a hanged RPC is a log like the following being the last in your +console: + +``` +2016-06-29 16:51:29.443 ConnectivityTestingApp[73129:3567949] Sending request. +``` diff --git a/src/objective-c/tests/Connectivity/ViewController.m b/src/objective-c/tests/Connectivity/ViewController.m new file mode 100644 index 0000000000..2b199c9617 --- /dev/null +++ b/src/objective-c/tests/Connectivity/ViewController.m @@ -0,0 +1,82 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import <UIKit/UIKit.h> + +#import <GRPCClient/GRPCCall.h> +#import <ProtoRPC/ProtoMethod.h> +#import <RxLibrary/GRXWriter+Immediate.h> +#import <RxLibrary/GRXWriter+Transformations.h> + +@interface ViewController : UIViewController +@end + +@implementation ViewController +- (void)viewDidLoad { + [super viewDidLoad]; + + NSString *host = @"grpc-test.sandbox.googleapis.com"; + + GRPCProtoMethod *method = [[GRPCProtoMethod alloc] initWithPackage:@"grpc.testing" + service:@"TestService" + method:@"StreamingOutputCall"]; + + __block void (^startCall)() = ^{ + GRXWriter *loggingRequestWriter = [[GRXWriter writerWithValue:[NSData data]] map:^id(id value) { + NSLog(@"Sending request."); + return value; + }]; + + GRPCCall *call = [[GRPCCall alloc] initWithHost:host + path:method.HTTPPath + requestsWriter:loggingRequestWriter]; + + [call startWithWriteable:[GRXWriteable writeableWithEventHandler:^(BOOL done, id value, + NSError *error) { + if (!done) { + return; + } + if (error) { + NSLog(@"Finished with error %@", error); + } else { + NSLog(@"Finished successfully."); + } + + dispatch_time_t oneSecond = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(1 * NSEC_PER_SEC)); + dispatch_after(oneSecond, dispatch_get_main_queue(), startCall); + }]]; + }; + + startCall(); +} +@end diff --git a/src/objective-c/tests/Connectivity/main.m b/src/objective-c/tests/Connectivity/main.m new file mode 100644 index 0000000000..5e09196d54 --- /dev/null +++ b/src/objective-c/tests/Connectivity/main.m @@ -0,0 +1,46 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import <UIKit/UIKit.h> + +@interface AppDelegate : UIResponder <UIApplicationDelegate> +@property (strong, nonatomic) UIWindow *window; +@end +@implementation AppDelegate +@end + +int main(int argc, char * argv[]) { + @autoreleasepool { + return UIApplicationMain(argc, argv, nil, NSStringFromClass(AppDelegate.class)); + } +} diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m index a503f02059..494743d604 100644 --- a/src/objective-c/tests/InteropTests.m +++ b/src/objective-c/tests/InteropTests.m @@ -40,7 +40,6 @@ #import <GRPCClient/GRPCCall+Tests.h> #import <GRPCClient/GRPCCall+Cronet.h> #import <ProtoRPC/ProtoRPC.h> -#import <RemoteTest/Empty.pbobjc.h> #import <RemoteTest/Messages.pbobjc.h> #import <RemoteTest/Test.pbobjc.h> #import <RemoteTest/Test.pbrpc.h> @@ -110,12 +109,12 @@ static cronet_engine *cronetEngine = NULL; XCTAssertNotNil(self.class.host); __weak XCTestExpectation *expectation = [self expectationWithDescription:@"EmptyUnary"]; - RMTEmpty *request = [RMTEmpty message]; + GPBEmpty *request = [GPBEmpty message]; - [_service emptyCallWithRequest:request handler:^(RMTEmpty *response, NSError *error) { + [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) { XCTAssertNil(error, @"Finished with unexpected error: %@", error); - id expectedResponse = [RMTEmpty message]; + id expectedResponse = [GPBEmpty message]; XCTAssertEqualObjects(response, expectedResponse); [expectation fulfill]; @@ -343,9 +342,9 @@ static cronet_engine *cronetEngine = NULL; __weak XCTestExpectation *expectation = [self expectationWithDescription:@"RPC after closing connection"]; - RMTEmpty *request = [RMTEmpty message]; + GPBEmpty *request = [GPBEmpty message]; - [_service emptyCallWithRequest:request handler:^(RMTEmpty *response, NSError *error) { + [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) { XCTAssertNil(error, @"First RPC finished with unexpected error: %@", error); #pragma clang diagnostic push @@ -353,7 +352,7 @@ static cronet_engine *cronetEngine = NULL; [GRPCCall closeOpenConnections]; #pragma clang diagnostic pop - [_service emptyCallWithRequest:request handler:^(RMTEmpty *response, NSError *error) { + [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) { XCTAssertNil(error, @"Second RPC finished with unexpected error: %@", error); [expectation fulfill]; }]; diff --git a/src/objective-c/tests/Podfile b/src/objective-c/tests/Podfile index 6d5f94cbda..30a34260d4 100644 --- a/src/objective-c/tests/Podfile +++ b/src/objective-c/tests/Podfile @@ -3,36 +3,59 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false -def shared_pods - pod 'Protobuf', :path => "../../../third_party/protobuf", :inhibit_warnings => true - pod 'BoringSSL', :podspec => "..", :inhibit_warnings => true - pod 'CronetFramework', :podspec => ".." - pod 'gRPC', :path => "../../.." - pod 'RemoteTest', :path => "RemoteTestClient" -end - -target 'Tests' do - shared_pods -end - -target 'AllTests' do - shared_pods -end +# Location of gRPC's repo root relative to this file. +GRPC_LOCAL_SRC = '../../..' -target 'RxLibraryUnitTests' do - shared_pods -end - -target 'InteropTestsRemote' do - shared_pods +# Install the dependencies in the main target plus all test targets. +%w( + Tests + AllTests + RxLibraryUnitTests + InteropTestsRemote + InteropTestsLocalSSL + InteropTestsLocalCleartext +).each do |target_name| + target target_name do + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf", :inhibit_warnings => true + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true + pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC + pod 'RemoteTest', :path => "RemoteTestClient" + end end -target 'InteropTestsLocalSSL' do - shared_pods -end +# gRPC-Core.podspec needs to be modified to be successfully used for local development. A Podfile's +# pre_install hook lets us do that. The block passed to it runs after the podspecs are downloaded +# and before they are installed in the user project. +# +# This podspec searches for the gRPC core library headers under "$(PODS_ROOT)/gRPC-Core", where +# Cocoapods normally places the downloaded sources. When doing local development of the libraries, +# though, Cocoapods just takes the sources from whatever directory was specified using `:path`, and +# doesn't copy them under $(PODS_ROOT). When using static libraries, one can sometimes rely on the +# symbolic links to the pods headers that Cocoapods creates under "$(PODS_ROOT)/Headers". But those +# aren't created when using dynamic frameworks. So our solution is to modify the podspec on the fly +# to point at the local directory where the sources are. +# +# TODO(jcanizales): Send a PR to Cocoapods to get rid of this need. +pre_install do |installer| + # This is the gRPC-Core podspec object, as initialized by its podspec file. + grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec -target 'InteropTestsLocalCleartext' do - shared_pods + # Copied from gRPC-Core.podspec, except for the adjusted src_root: + src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" + grpc_core_spec.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } end post_install do |installer| @@ -40,7 +63,7 @@ post_install do |installer| target.build_configurations.each do |config| config.build_settings['GCC_TREAT_WARNINGS_AS_ERRORS'] = 'YES' end - if target.name == 'gRPC' + if target.name == 'gRPC-Core' target.build_configurations.each do |config| # TODO(zyc) Remove this setting after the issue is resolved # GPR_UNREACHABLE_CODE causes "Control may reach end of non-void diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec index e1fd991038..25c9c7f841 100644 --- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec @@ -15,21 +15,27 @@ Pod::Spec.new do |s| BINDIR=../../../../bins/$CONFIG PROTOC=$BINDIR/protobuf/protoc PLUGIN=$BINDIR/grpc_objective_c_plugin - $PROTOC --plugin=protoc-gen-grpc=$PLUGIN --objc_out=. --grpc_out=. *.proto + # we use this path to locate well-known proto files + PROTO_SRC=../../../../third_party/protobuf/src + $PROTOC --plugin=protoc-gen-grpc=$PLUGIN --objc_out=. --grpc_out=. *.proto -I $PROTO_SRC -I . CMD s.subspec "Messages" do |ms| ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + ms.dependency "Protobuf", "~> 3.0.0-beta-3.1" + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end s.subspec "Services" do |ss| ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + ss.dependency "gRPC-ProtoRPC", "~> 0.14" ss.dependency "#{s.name}/Messages" end end diff --git a/src/objective-c/tests/RemoteTestClient/test.proto b/src/objective-c/tests/RemoteTestClient/test.proto index 514c3b8095..5c359c5c12 100644 --- a/src/objective-c/tests/RemoteTestClient/test.proto +++ b/src/objective-c/tests/RemoteTestClient/test.proto @@ -31,7 +31,7 @@ // of unary/streaming requests/responses. syntax = "proto3"; -import "empty.proto"; +import "google/protobuf/empty.proto"; import "messages.proto"; package grpc.testing; @@ -42,7 +42,7 @@ option objc_class_prefix = "RMT"; // performance with various types of payload. service TestService { // One empty request followed by one empty response. - rpc EmptyCall(grpc.testing.Empty) returns (grpc.testing.Empty); + rpc EmptyCall(google.protobuf.Empty) returns (google.protobuf.Empty); // One request followed by one response. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); diff --git a/src/php/composer.json b/src/php/composer.json index 01674a25db..2ad73223c6 100644 --- a/src/php/composer.json +++ b/src/php/composer.json @@ -2,7 +2,6 @@ "name": "grpc/grpc", "type": "library", "description": "gRPC library for PHP", - "version": "0.14.0", "keywords": ["rpc"], "homepage": "http://grpc.io", "license": "BSD-3-Clause", diff --git a/src/php/ext/grpc/byte_buffer.c b/src/php/ext/grpc/byte_buffer.c index 7a726de5db..3be1429f13 100644 --- a/src/php/ext/grpc/byte_buffer.c +++ b/src/php/ext/grpc/byte_buffer.c @@ -58,22 +58,20 @@ grpc_byte_buffer *string_to_byte_buffer(char *string, size_t length) { void byte_buffer_to_string(grpc_byte_buffer *buffer, char **out_string, size_t *out_length) { - if (buffer == NULL) { + grpc_byte_buffer_reader reader; + if (buffer == NULL || !grpc_byte_buffer_reader_init(&reader, buffer)) { + /* TODO(dgq): distinguish between the error cases. */ *out_string = NULL; *out_length = 0; return; } - size_t length = grpc_byte_buffer_length(buffer); + + gpr_slice slice = grpc_byte_buffer_reader_readall(&reader); + size_t length = GPR_SLICE_LENGTH(slice); char *string = ecalloc(length + 1, sizeof(char)); - size_t offset = 0; - grpc_byte_buffer_reader reader; - grpc_byte_buffer_reader_init(&reader, buffer); - gpr_slice next; - while (grpc_byte_buffer_reader_next(&reader, &next) != 0) { - memcpy(string + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next)); - offset += GPR_SLICE_LENGTH(next); - gpr_slice_unref(next); - } + memcpy(string, GPR_SLICE_START_PTR(slice), length); + gpr_slice_unref(slice); + *out_string = string; *out_length = length; } diff --git a/src/php/ext/grpc/channel.c b/src/php/ext/grpc/channel.c index 9f0431908f..8d94c59683 100644 --- a/src/php/ext/grpc/channel.c +++ b/src/php/ext/grpc/channel.c @@ -48,7 +48,6 @@ #include <stdbool.h> #include <grpc/grpc.h> -#include <grpc/support/log.h> #include <grpc/grpc_security.h> #include "completion_queue.h" @@ -172,7 +171,6 @@ PHP_METHOD(Channel, __construct) { if (creds == NULL) { channel->wrapped = grpc_insecure_channel_create(target, &args, NULL); } else { - gpr_log(GPR_DEBUG, "Initialized secure channel"); channel->wrapped = grpc_secure_channel_create(creds->wrapped, target, &args, NULL); } diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c index f4cb5b28cc..449ba3cd47 100644 --- a/src/php/ext/grpc/php_grpc.c +++ b/src/php/ext/grpc/php_grpc.c @@ -248,6 +248,8 @@ PHP_MSHUTDOWN_FUNCTION(grpc) { /* uncomment this line if you have INI entries UNREGISTER_INI_ENTRIES(); */ + // WARNING: This function IS being called by PHP when the extension + // is unloaded but the logs were somehow suppressed. grpc_shutdown_timeval(TSRMLS_C); grpc_php_shutdown_completion_queue(TSRMLS_C); grpc_shutdown(); diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c index 6df2e4f978..c13e7cd1f9 100644 --- a/src/php/ext/grpc/server.c +++ b/src/php/ext/grpc/server.c @@ -48,7 +48,6 @@ #include <stdbool.h> #include <grpc/grpc.h> -#include <grpc/support/log.h> #include <grpc/grpc_security.h> #include "completion_queue.h" diff --git a/src/python/grpcio/_unixccompiler_patch.py b/src/python/grpcio/_unixccompiler_patch.py new file mode 100644 index 0000000000..9a697989b3 --- /dev/null +++ b/src/python/grpcio/_unixccompiler_patch.py @@ -0,0 +1,121 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Covers inadequacies in distutils.""" + +from distutils import ccompiler +from distutils import errors +from distutils import unixccompiler +import os +import os.path +import shutil +import sys +import tempfile + + +def _unix_piecemeal_link( + self, target_desc, objects, output_filename, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, + build_temp=None, target_lang=None): + """`link` externalized method taken almost verbatim from UnixCCompiler. + + Modifies the link command for unix-like compilers by using a command file so + that long command line argument strings don't break the command shell's + ARG_MAX character limit. + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = self._fix_lib_args( + libraries, library_dirs, runtime_library_dirs) + # filter out standard library paths, which are not explicitely needed + # for linking + library_dirs = [dir for dir in library_dirs + if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')] + runtime_library_dirs = [dir for dir in runtime_library_dirs + if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')] + lib_opts = ccompiler.gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if (not (isinstance(output_dir, str) or isinstance(output_dir, bytes)) + and output_dir is not None): + raise TypeError("'output_dir' must be a string or None") + if output_dir is not None: + output_filename = os.path.join(output_dir, output_filename) + + if self._need_link(objects, output_filename): + ld_args = (objects + self.objects + + lib_opts + ['-o', output_filename]) + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + try: + if target_desc == ccompiler.CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + if target_lang == "c++" and self.compiler_cxx: + # skip over environment variable settings if /usr/bin/env + # is used to set up the linker's environment. + # This is needed on OSX. Note: this assumes that the + # normal and C++ compiler have the same environment + # settings. + i = 0 + if os.path.basename(linker[0]) == "env": + i = 1 + while '=' in linker[i]: + i = i + 1 + + linker[i] = self.compiler_cxx[i] + + if sys.platform == 'darwin': + import _osx_support + linker = _osx_support.compiler_fixup(linker, ld_args) + + temporary_directory = tempfile.mkdtemp() + command_filename = os.path.abspath( + os.path.join(temporary_directory, 'command')) + with open(command_filename, 'w') as command_file: + escaped_ld_args = [arg.replace('\\', '\\\\') for arg in ld_args] + command_file.write(' '.join(escaped_ld_args)) + self.spawn(linker + ['@{}'.format(command_filename)]) + except errors.DistutilsExecError: + raise ccompiler.LinkError + else: + log.debug("skipping %s (up-to-date)", output_filename) + +# TODO(atash) try replacing this monkeypatch of the compiler harness' link +# operation with a monkeypatch of the distutils `spawn` that applies +# command-argument-file hacks where it can. Might be cleaner. +def monkeypatch_unix_compiler(): + """Monkeypatching is dumb, but it's either that or we become maintainers of + something much, much bigger.""" + unixccompiler.UnixCCompiler.link = _unix_piecemeal_link diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index b3eeaad1f7..de18421cf3 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -1091,37 +1091,41 @@ def access_token_call_credentials(access_token): _auth.AccessTokenCallCredentials(access_token)) -def composite_call_credentials(call_credentials, additional_call_credentials): - """Compose two CallCredentials to make a new one. +def composite_call_credentials(*call_credentials): + """Compose multiple CallCredentials to make a new CallCredentials. Args: - call_credentials: A CallCredentials object. - additional_call_credentials: Another CallCredentials object to compose on - top of call_credentials. + *call_credentials: At least two CallCredentials objects. Returns: - A new CallCredentials composed of the two given CallCredentials. + A CallCredentials object composed of the given CallCredentials objects. """ + from grpc import _credential_composition + cygrpc_call_credentials = tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials) return CallCredentials( - _cygrpc.call_credentials_composite( - call_credentials._credentials, - additional_call_credentials._credentials)) + _credential_composition.call(cygrpc_call_credentials)) -def composite_channel_credentials(channel_credentials, call_credentials): - """Compose a ChannelCredentials and a CallCredentials. +def composite_channel_credentials(channel_credentials, *call_credentials): + """Compose a ChannelCredentials and one or more CallCredentials objects. Args: channel_credentials: A ChannelCredentials. - call_credentials: A CallCredentials. + *call_credentials: One or more CallCredentials objects. Returns: A ChannelCredentials composed of the given ChannelCredentials and - CallCredentials. + CallCredentials objects. """ + from grpc import _credential_composition + cygrpc_call_credentials = tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials) return ChannelCredentials( - _cygrpc.channel_credentials_composite( - channel_credentials._credentials, call_credentials._credentials)) + _credential_composition.channel( + channel_credentials._credentials, cygrpc_call_credentials)) def ssl_server_credentials( @@ -1207,25 +1211,23 @@ def secure_channel(target, credentials, options=None): return _channel.Channel(target, options, credentials._credentials) -def server(generic_rpc_handlers, thread_pool, options=None): +def server(thread_pool, handlers=None): """Creates a Server with which RPCs can be serviced. - The GenericRpcHandlers passed to this function needn't be the only - GenericRpcHandlers that will be used to serve RPCs; others may be added later - by calling add_generic_rpc_handlers any time before the returned server is - started. - Args: - generic_rpc_handlers: Some number of GenericRpcHandlers that will be used - to service RPCs after the returned Server is started. thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server to service RPCs. + handlers: An optional sequence of GenericRpcHandlers to be used to service + RPCs after the returned Server is started. These handlers need not be the + only handlers the server will use to service RPCs; other handlers may + later be added by calling add_generic_rpc_handlers any time before the + returned Server is started. Returns: A Server with which RPCs can be serviced. """ from grpc import _server - return _server.Server(generic_rpc_handlers, thread_pool) + return _server.Server(thread_pool, () if handlers is None else handlers) ################################### __all__ ################################# diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index a89b501303..29dbc3a668 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -195,7 +195,8 @@ def _consume_request_iterator( cygrpc.operation_send_message( serialized_request, _EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), + event_handler) state.due.add(cygrpc.OperationType.send_message) while True: state.condition.wait() @@ -211,7 +212,7 @@ def _consume_request_iterator( operations = ( cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) state.due.add(cygrpc.OperationType.send_close_from_client) def stop_consumption_thread(timeout): @@ -312,7 +313,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): if self._state.code is None: event_handler = _event_handler( self._state, self._call, self._response_deserializer) - self._call.start_batch( + self._call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_message(_EMPTY_FLAGS),)), event_handler) @@ -471,7 +472,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): None, 0, completion_queue, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) - call.start_batch(cygrpc.Operations(operations), None) + call.start_client_batch(cygrpc.Operations(operations), None) _handle_event(completion_queue.poll(), state, self._response_deserializer) return state, deadline @@ -495,7 +496,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -523,7 +524,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) @@ -534,7 +535,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -558,7 +559,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): if credentials is not None: call.set_credentials(credentials._credentials) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), None) @@ -568,7 +569,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), None) + call.start_client_batch(cygrpc.Operations(operations), None) _consume_request_iterator( request_iterator, state, call, self._request_serializer) while True: @@ -602,7 +603,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) @@ -612,7 +613,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -639,7 +640,7 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) @@ -648,7 +649,7 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) diff --git a/src/python/grpcio/grpc/_credential_composition.py b/src/python/grpcio/grpc/_credential_composition.py new file mode 100644 index 0000000000..9cb5508e27 --- /dev/null +++ b/src/python/grpcio/grpc/_credential_composition.py @@ -0,0 +1,48 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from grpc._cython import cygrpc + + +def _call(call_credentialses): + call_credentials_iterator = iter(call_credentialses) + composition = next(call_credentials_iterator) + for additional_call_credentials in call_credentials_iterator: + composition = cygrpc.call_credentials_composite( + composition, additional_call_credentials) + return composition + + +def call(call_credentialses): + return _call(call_credentialses) + + +def channel(channel_credentials, call_credentialses): + return cygrpc.channel_credentials_composite( + channel_credentials, _call(call_credentialses)) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi index a09bbc75fe..ba60986143 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi @@ -37,13 +37,16 @@ cdef class Call: self.c_call = NULL self.references = [] - def start_batch(self, operations, tag): + def _start_batch(self, operations, tag, retain_self): if not self.is_valid: raise ValueError("invalid call object cannot be used from Python") cdef grpc_call_error result cdef Operations cy_operations = Operations(operations) cdef OperationTag operation_tag = OperationTag(tag) - operation_tag.operation_call = self + if retain_self: + operation_tag.operation_call = self + else: + operation_tag.operation_call = None operation_tag.batch_operations = cy_operations cpython.Py_INCREF(operation_tag) with nogil: @@ -52,6 +55,14 @@ cdef class Call: <cpython.PyObject *>operation_tag, NULL) return result + def start_client_batch(self, operations, tag): + # We don't reference this call in the operations tag because + # it should be cancelled when it goes out of scope + return self._start_batch(operations, tag, False) + + def start_server_batch(self, operations, tag): + return self._start_batch(operations, tag, True) + def cancel( self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE, details=None): @@ -94,8 +105,7 @@ cdef class Call: def __dealloc__(self): if self.c_call != NULL: - with nogil: - grpc_call_destroy(self.c_call) + grpc_call_destroy(self.c_call) # The object *should* always be valid from Python. Used for debugging. @property diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi index 1406696510..5416401431 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi @@ -102,5 +102,4 @@ cdef class Channel: def __dealloc__(self): if self.c_channel != NULL: - with nogil: - grpc_channel_destroy(self.c_channel) + grpc_channel_destroy(self.c_channel) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi index 90266516fe..5955021ceb 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi @@ -118,18 +118,14 @@ cdef class CompletionQueue: def __dealloc__(self): cdef gpr_timespec c_deadline - with nogil: - c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME) + c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME) if self.c_completion_queue != NULL: # Ensure shutdown if not self.is_shutting_down: - with nogil: - grpc_completion_queue_shutdown(self.c_completion_queue) - # Pump the queue + grpc_completion_queue_shutdown(self.c_completion_queue) + # Pump the queue (All outstanding calls should have been cancelled) while not self.is_shutdown: - with nogil: - event = grpc_completion_queue_next( - self.c_completion_queue, c_deadline, NULL) + event = grpc_completion_queue_next( + self.c_completion_queue, c_deadline, NULL) self._interpret_event(event) - with nogil: - grpc_completion_queue_destroy(self.c_completion_queue) + grpc_completion_queue_destroy(self.c_completion_queue) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index b24e69243e..035ac49a8b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -46,8 +46,7 @@ cdef class ChannelCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_channel_credentials_release(self.c_credentials) + grpc_channel_credentials_release(self.c_credentials) cdef class CallCredentials: @@ -64,8 +63,7 @@ cdef class CallCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_call_credentials_release(self.c_credentials) + grpc_call_credentials_release(self.c_credentials) cdef class ServerCredentials: @@ -76,8 +74,7 @@ cdef class ServerCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_server_credentials_release(self.c_credentials) + grpc_server_credentials_release(self.c_credentials) cdef class CredentialsMetadataPlugin: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index f3b3d61273..7714504d1b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -132,8 +132,8 @@ cdef extern from "grpc/_cython/loader.h": size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil - void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer) nogil + int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, + grpc_byte_buffer *buffer) nogil int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, gpr_slice *slice) nogil void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi index 0474697af8..96c5b02bc2 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi @@ -58,14 +58,14 @@ cdef class Event: cdef readonly bint success cdef readonly object tag - # For operations with calls - cdef readonly Call operation_call - # For Server.request_call cdef readonly bint is_new_request cdef readonly CallDetails request_call_details cdef readonly Metadata request_metadata + # For server calls + cdef readonly Call operation_call + # For Call.start_batch cdef readonly Operations batch_operations diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index 8e651e880f..54b3d00dfc 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -252,9 +252,13 @@ cdef class ByteBuffer: cdef gpr_slice data_slice cdef size_t data_slice_length cdef void *data_slice_pointer + cdef bint reader_status if self.c_byte_buffer != NULL: with nogil: - grpc_byte_buffer_reader_init(&reader, self.c_byte_buffer) + reader_status = grpc_byte_buffer_reader_init( + &reader, self.c_byte_buffer) + if not reader_status: + return None result = bytearray() with nogil: while grpc_byte_buffer_reader_next(&reader, &data_slice): @@ -283,8 +287,7 @@ cdef class ByteBuffer: def __dealloc__(self): if self.c_byte_buffer != NULL: - with nogil: - grpc_byte_buffer_destroy(self.c_byte_buffer) + grpc_byte_buffer_destroy(self.c_byte_buffer) cdef class SslPemKeyCertPair: @@ -416,8 +419,7 @@ cdef class Metadata: # this frees the allocated memory for the grpc_metadata_array (although # it'd be nice if that were documented somewhere...) # TODO(atash): document this in the C core - with nogil: - grpc_metadata_array_destroy(&self.c_metadata_array) + grpc_metadata_array_destroy(&self.c_metadata_array) def __len__(self): return self.c_metadata_array.count @@ -526,8 +528,7 @@ cdef class Operation: # Python. The remaining one(s) are primitive fields filled in by GRPC core. # This means that we need to clean up after receive_status_on_client. if self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT: - with nogil: - gpr_free(self._received_status_details) + gpr_free(self._received_status_details) def operation_send_initial_metadata(Metadata metadata, int flags): cdef Operation op = Operation() diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi index 3e03b6efe1..4f2d51b03f 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi @@ -171,5 +171,4 @@ cdef class Server: # much but repeatedly release the GIL and wait while not self.is_shutdown: time.sleep(0) - with nogil: - grpc_server_destroy(self.c_server) + grpc_server_destroy(self.c_server) diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx index 7a8d0dd8a1..e055d321bc 100644 --- a/src/python/grpcio/grpc/_cython/cygrpc.pyx +++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx @@ -50,13 +50,6 @@ include "grpc/_cython/_cygrpc/server.pyx.pxi" def _initialize(): - if 'win32' in sys.platform: - filename = pkg_resources.resource_filename( - 'grpc._cython', '_windows/grpc_c.64.python') - if not isinstance(filename, bytes): - filename = filename.encode() - if not pygrpc_load_core(filename): - raise ImportError('failed to load core gRPC library') if not pygrpc_initialize_core(): raise ImportError('failed to initialize core gRPC library') diff --git a/src/python/grpcio/grpc/_cython/imports.generated.c b/src/python/grpcio/grpc/_cython/imports.generated.c index d78ec2f66e..c0080b5a47 100644 --- a/src/python/grpcio/grpc/_cython/imports.generated.c +++ b/src/python/grpcio/grpc/_cython/imports.generated.c @@ -31,562 +31,7 @@ * */ +/* TODO(atash) remove cruft */ #include <grpc/support/port_platform.h> #include "imports.generated.h" - -#ifdef GPR_WINDOWS - -census_initialize_type census_initialize_import; -census_shutdown_type census_shutdown_import; -census_supported_type census_supported_import; -census_enabled_type census_enabled_import; -census_context_create_type census_context_create_import; -census_context_destroy_type census_context_destroy_import; -census_context_get_status_type census_context_get_status_import; -census_context_initialize_iterator_type census_context_initialize_iterator_import; -census_context_next_tag_type census_context_next_tag_import; -census_context_get_tag_type census_context_get_tag_import; -census_context_encode_type census_context_encode_import; -census_context_decode_type census_context_decode_import; -census_trace_mask_type census_trace_mask_import; -census_set_trace_mask_type census_set_trace_mask_import; -census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import; -census_start_client_rpc_op_type census_start_client_rpc_op_import; -census_set_rpc_client_peer_type census_set_rpc_client_peer_import; -census_start_server_rpc_op_type census_start_server_rpc_op_import; -census_start_op_type census_start_op_import; -census_end_op_type census_end_op_import; -census_trace_print_type census_trace_print_import; -census_trace_scan_start_type census_trace_scan_start_import; -census_get_trace_record_type census_get_trace_record_import; -census_trace_scan_end_type census_trace_scan_end_import; -census_record_values_type census_record_values_import; -census_view_create_type census_view_create_import; -census_view_delete_type census_view_delete_import; -census_view_metric_type census_view_metric_import; -census_view_naggregations_type census_view_naggregations_import; -census_view_tags_type census_view_tags_import; -census_view_aggregrations_type census_view_aggregrations_import; -census_view_get_data_type census_view_get_data_import; -census_view_reset_type census_view_reset_import; -grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import; -grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import; -grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import; -grpc_compression_options_init_type grpc_compression_options_init_import; -grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import; -grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import; -grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import; -grpc_metadata_array_init_type grpc_metadata_array_init_import; -grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import; -grpc_call_details_init_type grpc_call_details_init_import; -grpc_call_details_destroy_type grpc_call_details_destroy_import; -grpc_register_plugin_type grpc_register_plugin_import; -grpc_init_type grpc_init_import; -grpc_shutdown_type grpc_shutdown_import; -grpc_version_string_type grpc_version_string_import; -grpc_completion_queue_create_type grpc_completion_queue_create_import; -grpc_completion_queue_next_type grpc_completion_queue_next_import; -grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; -grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import; -grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; -grpc_alarm_create_type grpc_alarm_create_import; -grpc_alarm_cancel_type grpc_alarm_cancel_import; -grpc_alarm_destroy_type grpc_alarm_destroy_import; -grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; -grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; -grpc_channel_create_call_type grpc_channel_create_call_import; -grpc_channel_ping_type grpc_channel_ping_import; -grpc_channel_register_call_type grpc_channel_register_call_import; -grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import; -grpc_call_start_batch_type grpc_call_start_batch_import; -grpc_call_get_peer_type grpc_call_get_peer_import; -grpc_census_call_set_context_type grpc_census_call_set_context_import; -grpc_census_call_get_context_type grpc_census_call_get_context_import; -grpc_channel_get_target_type grpc_channel_get_target_import; -grpc_insecure_channel_create_type grpc_insecure_channel_create_import; -grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import; -grpc_channel_destroy_type grpc_channel_destroy_import; -grpc_call_cancel_type grpc_call_cancel_import; -grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import; -grpc_call_destroy_type grpc_call_destroy_import; -grpc_server_request_call_type grpc_server_request_call_import; -grpc_server_register_method_type grpc_server_register_method_import; -grpc_server_request_registered_call_type grpc_server_request_registered_call_import; -grpc_server_create_type grpc_server_create_import; -grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import; -grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import; -grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import; -grpc_server_start_type grpc_server_start_import; -grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import; -grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import; -grpc_server_destroy_type grpc_server_destroy_import; -grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import; -grpc_header_key_is_legal_type grpc_header_key_is_legal_import; -grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import; -grpc_is_binary_header_type grpc_is_binary_header_import; -grpc_call_error_to_string_type grpc_call_error_to_string_import; -grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import; -grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import; -grpc_use_signal_type grpc_use_signal_import; -grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import; -grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import; -grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import; -grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import; -grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import; -grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import; -grpc_call_auth_context_type grpc_call_auth_context_import; -grpc_auth_context_release_type grpc_auth_context_release_import; -grpc_auth_context_add_property_type grpc_auth_context_add_property_import; -grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import; -grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import; -grpc_channel_credentials_release_type grpc_channel_credentials_release_import; -grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import; -grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import; -grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import; -grpc_call_credentials_release_type grpc_call_credentials_release_import; -grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import; -grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import; -grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import; -grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import; -grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import; -grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import; -grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import; -grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import; -grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import; -grpc_secure_channel_create_type grpc_secure_channel_create_import; -grpc_server_credentials_release_type grpc_server_credentials_release_import; -grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; -grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; -grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; -grpc_call_set_credentials_type grpc_call_set_credentials_import; -grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import; -gpr_malloc_type gpr_malloc_import; -gpr_free_type gpr_free_import; -gpr_realloc_type gpr_realloc_import; -gpr_malloc_aligned_type gpr_malloc_aligned_import; -gpr_free_aligned_type gpr_free_aligned_import; -gpr_set_allocation_functions_type gpr_set_allocation_functions_import; -gpr_get_allocation_functions_type gpr_get_allocation_functions_import; -grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import; -grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import; -grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import; -grpc_byte_buffer_length_type grpc_byte_buffer_length_import; -grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import; -grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import; -grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import; -grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import; -grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import; -grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import; -gpr_log_type gpr_log_import; -gpr_log_message_type gpr_log_message_import; -gpr_set_log_verbosity_type gpr_set_log_verbosity_import; -gpr_log_verbosity_init_type gpr_log_verbosity_init_import; -gpr_set_log_function_type gpr_set_log_function_import; -gpr_slice_ref_type gpr_slice_ref_import; -gpr_slice_unref_type gpr_slice_unref_import; -gpr_slice_new_type gpr_slice_new_import; -gpr_slice_new_with_len_type gpr_slice_new_with_len_import; -gpr_slice_malloc_type gpr_slice_malloc_import; -gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import; -gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import; -gpr_slice_from_static_string_type gpr_slice_from_static_string_import; -gpr_slice_sub_type gpr_slice_sub_import; -gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import; -gpr_slice_split_tail_type gpr_slice_split_tail_import; -gpr_slice_split_head_type gpr_slice_split_head_import; -gpr_empty_slice_type gpr_empty_slice_import; -gpr_slice_cmp_type gpr_slice_cmp_import; -gpr_slice_str_cmp_type gpr_slice_str_cmp_import; -gpr_slice_buffer_init_type gpr_slice_buffer_init_import; -gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import; -gpr_slice_buffer_add_type gpr_slice_buffer_add_import; -gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import; -gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import; -gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import; -gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import; -gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import; -gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import; -gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import; -gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import; -gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import; -gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import; -gpr_mu_init_type gpr_mu_init_import; -gpr_mu_destroy_type gpr_mu_destroy_import; -gpr_mu_lock_type gpr_mu_lock_import; -gpr_mu_unlock_type gpr_mu_unlock_import; -gpr_mu_trylock_type gpr_mu_trylock_import; -gpr_cv_init_type gpr_cv_init_import; -gpr_cv_destroy_type gpr_cv_destroy_import; -gpr_cv_wait_type gpr_cv_wait_import; -gpr_cv_signal_type gpr_cv_signal_import; -gpr_cv_broadcast_type gpr_cv_broadcast_import; -gpr_once_init_type gpr_once_init_import; -gpr_event_init_type gpr_event_init_import; -gpr_event_set_type gpr_event_set_import; -gpr_event_get_type gpr_event_get_import; -gpr_event_wait_type gpr_event_wait_import; -gpr_ref_init_type gpr_ref_init_import; -gpr_ref_type gpr_ref_import; -gpr_ref_non_zero_type gpr_ref_non_zero_import; -gpr_refn_type gpr_refn_import; -gpr_unref_type gpr_unref_import; -gpr_stats_init_type gpr_stats_init_import; -gpr_stats_inc_type gpr_stats_inc_import; -gpr_stats_read_type gpr_stats_read_import; -gpr_time_0_type gpr_time_0_import; -gpr_inf_future_type gpr_inf_future_import; -gpr_inf_past_type gpr_inf_past_import; -gpr_time_init_type gpr_time_init_import; -gpr_now_type gpr_now_import; -gpr_convert_clock_type_type gpr_convert_clock_type_import; -gpr_time_cmp_type gpr_time_cmp_import; -gpr_time_max_type gpr_time_max_import; -gpr_time_min_type gpr_time_min_import; -gpr_time_add_type gpr_time_add_import; -gpr_time_sub_type gpr_time_sub_import; -gpr_time_from_micros_type gpr_time_from_micros_import; -gpr_time_from_nanos_type gpr_time_from_nanos_import; -gpr_time_from_millis_type gpr_time_from_millis_import; -gpr_time_from_seconds_type gpr_time_from_seconds_import; -gpr_time_from_minutes_type gpr_time_from_minutes_import; -gpr_time_from_hours_type gpr_time_from_hours_import; -gpr_time_to_millis_type gpr_time_to_millis_import; -gpr_time_similar_type gpr_time_similar_import; -gpr_sleep_until_type gpr_sleep_until_import; -gpr_timespec_to_micros_type gpr_timespec_to_micros_import; -gpr_avl_create_type gpr_avl_create_import; -gpr_avl_ref_type gpr_avl_ref_import; -gpr_avl_unref_type gpr_avl_unref_import; -gpr_avl_add_type gpr_avl_add_import; -gpr_avl_remove_type gpr_avl_remove_import; -gpr_avl_get_type gpr_avl_get_import; -gpr_avl_maybe_get_type gpr_avl_maybe_get_import; -gpr_avl_is_empty_type gpr_avl_is_empty_import; -gpr_cmdline_create_type gpr_cmdline_create_import; -gpr_cmdline_add_int_type gpr_cmdline_add_int_import; -gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import; -gpr_cmdline_add_string_type gpr_cmdline_add_string_import; -gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import; -gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import; -gpr_cmdline_parse_type gpr_cmdline_parse_import; -gpr_cmdline_destroy_type gpr_cmdline_destroy_import; -gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import; -gpr_cpu_num_cores_type gpr_cpu_num_cores_import; -gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import; -gpr_histogram_create_type gpr_histogram_create_import; -gpr_histogram_destroy_type gpr_histogram_destroy_import; -gpr_histogram_add_type gpr_histogram_add_import; -gpr_histogram_merge_type gpr_histogram_merge_import; -gpr_histogram_percentile_type gpr_histogram_percentile_import; -gpr_histogram_mean_type gpr_histogram_mean_import; -gpr_histogram_stddev_type gpr_histogram_stddev_import; -gpr_histogram_variance_type gpr_histogram_variance_import; -gpr_histogram_maximum_type gpr_histogram_maximum_import; -gpr_histogram_minimum_type gpr_histogram_minimum_import; -gpr_histogram_count_type gpr_histogram_count_import; -gpr_histogram_sum_type gpr_histogram_sum_import; -gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import; -gpr_histogram_get_contents_type gpr_histogram_get_contents_import; -gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import; -gpr_join_host_port_type gpr_join_host_port_import; -gpr_split_host_port_type gpr_split_host_port_import; -gpr_format_message_type gpr_format_message_import; -gpr_strdup_type gpr_strdup_import; -gpr_asprintf_type gpr_asprintf_import; -gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import; -gpr_subprocess_create_type gpr_subprocess_create_import; -gpr_subprocess_destroy_type gpr_subprocess_destroy_import; -gpr_subprocess_join_type gpr_subprocess_join_import; -gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import; -gpr_thd_new_type gpr_thd_new_import; -gpr_thd_options_default_type gpr_thd_options_default_import; -gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import; -gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import; -gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import; -gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import; -gpr_thd_currentid_type gpr_thd_currentid_import; -gpr_thd_join_type gpr_thd_join_import; - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -void pygrpc_load_imports(HMODULE library) { - census_initialize_import = (census_initialize_type) GetProcAddress(library, "census_initialize"); - census_shutdown_import = (census_shutdown_type) GetProcAddress(library, "census_shutdown"); - census_supported_import = (census_supported_type) GetProcAddress(library, "census_supported"); - census_enabled_import = (census_enabled_type) GetProcAddress(library, "census_enabled"); - census_context_create_import = (census_context_create_type) GetProcAddress(library, "census_context_create"); - census_context_destroy_import = (census_context_destroy_type) GetProcAddress(library, "census_context_destroy"); - census_context_get_status_import = (census_context_get_status_type) GetProcAddress(library, "census_context_get_status"); - census_context_initialize_iterator_import = (census_context_initialize_iterator_type) GetProcAddress(library, "census_context_initialize_iterator"); - census_context_next_tag_import = (census_context_next_tag_type) GetProcAddress(library, "census_context_next_tag"); - census_context_get_tag_import = (census_context_get_tag_type) GetProcAddress(library, "census_context_get_tag"); - census_context_encode_import = (census_context_encode_type) GetProcAddress(library, "census_context_encode"); - census_context_decode_import = (census_context_decode_type) GetProcAddress(library, "census_context_decode"); - census_trace_mask_import = (census_trace_mask_type) GetProcAddress(library, "census_trace_mask"); - census_set_trace_mask_import = (census_set_trace_mask_type) GetProcAddress(library, "census_set_trace_mask"); - census_start_rpc_op_timestamp_import = (census_start_rpc_op_timestamp_type) GetProcAddress(library, "census_start_rpc_op_timestamp"); - census_start_client_rpc_op_import = (census_start_client_rpc_op_type) GetProcAddress(library, "census_start_client_rpc_op"); - census_set_rpc_client_peer_import = (census_set_rpc_client_peer_type) GetProcAddress(library, "census_set_rpc_client_peer"); - census_start_server_rpc_op_import = (census_start_server_rpc_op_type) GetProcAddress(library, "census_start_server_rpc_op"); - census_start_op_import = (census_start_op_type) GetProcAddress(library, "census_start_op"); - census_end_op_import = (census_end_op_type) GetProcAddress(library, "census_end_op"); - census_trace_print_import = (census_trace_print_type) GetProcAddress(library, "census_trace_print"); - census_trace_scan_start_import = (census_trace_scan_start_type) GetProcAddress(library, "census_trace_scan_start"); - census_get_trace_record_import = (census_get_trace_record_type) GetProcAddress(library, "census_get_trace_record"); - census_trace_scan_end_import = (census_trace_scan_end_type) GetProcAddress(library, "census_trace_scan_end"); - census_record_values_import = (census_record_values_type) GetProcAddress(library, "census_record_values"); - census_view_create_import = (census_view_create_type) GetProcAddress(library, "census_view_create"); - census_view_delete_import = (census_view_delete_type) GetProcAddress(library, "census_view_delete"); - census_view_metric_import = (census_view_metric_type) GetProcAddress(library, "census_view_metric"); - census_view_naggregations_import = (census_view_naggregations_type) GetProcAddress(library, "census_view_naggregations"); - census_view_tags_import = (census_view_tags_type) GetProcAddress(library, "census_view_tags"); - census_view_aggregrations_import = (census_view_aggregrations_type) GetProcAddress(library, "census_view_aggregrations"); - census_view_get_data_import = (census_view_get_data_type) GetProcAddress(library, "census_view_get_data"); - census_view_reset_import = (census_view_reset_type) GetProcAddress(library, "census_view_reset"); - grpc_compression_algorithm_parse_import = (grpc_compression_algorithm_parse_type) GetProcAddress(library, "grpc_compression_algorithm_parse"); - grpc_compression_algorithm_name_import = (grpc_compression_algorithm_name_type) GetProcAddress(library, "grpc_compression_algorithm_name"); - grpc_compression_algorithm_for_level_import = (grpc_compression_algorithm_for_level_type) GetProcAddress(library, "grpc_compression_algorithm_for_level"); - grpc_compression_options_init_import = (grpc_compression_options_init_type) GetProcAddress(library, "grpc_compression_options_init"); - grpc_compression_options_enable_algorithm_import = (grpc_compression_options_enable_algorithm_type) GetProcAddress(library, "grpc_compression_options_enable_algorithm"); - grpc_compression_options_disable_algorithm_import = (grpc_compression_options_disable_algorithm_type) GetProcAddress(library, "grpc_compression_options_disable_algorithm"); - grpc_compression_options_is_algorithm_enabled_import = (grpc_compression_options_is_algorithm_enabled_type) GetProcAddress(library, "grpc_compression_options_is_algorithm_enabled"); - grpc_metadata_array_init_import = (grpc_metadata_array_init_type) GetProcAddress(library, "grpc_metadata_array_init"); - grpc_metadata_array_destroy_import = (grpc_metadata_array_destroy_type) GetProcAddress(library, "grpc_metadata_array_destroy"); - grpc_call_details_init_import = (grpc_call_details_init_type) GetProcAddress(library, "grpc_call_details_init"); - grpc_call_details_destroy_import = (grpc_call_details_destroy_type) GetProcAddress(library, "grpc_call_details_destroy"); - grpc_register_plugin_import = (grpc_register_plugin_type) GetProcAddress(library, "grpc_register_plugin"); - grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init"); - grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown"); - grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string"); - grpc_completion_queue_create_import = (grpc_completion_queue_create_type) GetProcAddress(library, "grpc_completion_queue_create"); - grpc_completion_queue_next_import = (grpc_completion_queue_next_type) GetProcAddress(library, "grpc_completion_queue_next"); - grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck"); - grpc_completion_queue_shutdown_import = (grpc_completion_queue_shutdown_type) GetProcAddress(library, "grpc_completion_queue_shutdown"); - grpc_completion_queue_destroy_import = (grpc_completion_queue_destroy_type) GetProcAddress(library, "grpc_completion_queue_destroy"); - grpc_alarm_create_import = (grpc_alarm_create_type) GetProcAddress(library, "grpc_alarm_create"); - grpc_alarm_cancel_import = (grpc_alarm_cancel_type) GetProcAddress(library, "grpc_alarm_cancel"); - grpc_alarm_destroy_import = (grpc_alarm_destroy_type) GetProcAddress(library, "grpc_alarm_destroy"); - grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state"); - grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state"); - grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call"); - grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping"); - grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call"); - grpc_channel_create_registered_call_import = (grpc_channel_create_registered_call_type) GetProcAddress(library, "grpc_channel_create_registered_call"); - grpc_call_start_batch_import = (grpc_call_start_batch_type) GetProcAddress(library, "grpc_call_start_batch"); - grpc_call_get_peer_import = (grpc_call_get_peer_type) GetProcAddress(library, "grpc_call_get_peer"); - grpc_census_call_set_context_import = (grpc_census_call_set_context_type) GetProcAddress(library, "grpc_census_call_set_context"); - grpc_census_call_get_context_import = (grpc_census_call_get_context_type) GetProcAddress(library, "grpc_census_call_get_context"); - grpc_channel_get_target_import = (grpc_channel_get_target_type) GetProcAddress(library, "grpc_channel_get_target"); - grpc_insecure_channel_create_import = (grpc_insecure_channel_create_type) GetProcAddress(library, "grpc_insecure_channel_create"); - grpc_lame_client_channel_create_import = (grpc_lame_client_channel_create_type) GetProcAddress(library, "grpc_lame_client_channel_create"); - grpc_channel_destroy_import = (grpc_channel_destroy_type) GetProcAddress(library, "grpc_channel_destroy"); - grpc_call_cancel_import = (grpc_call_cancel_type) GetProcAddress(library, "grpc_call_cancel"); - grpc_call_cancel_with_status_import = (grpc_call_cancel_with_status_type) GetProcAddress(library, "grpc_call_cancel_with_status"); - grpc_call_destroy_import = (grpc_call_destroy_type) GetProcAddress(library, "grpc_call_destroy"); - grpc_server_request_call_import = (grpc_server_request_call_type) GetProcAddress(library, "grpc_server_request_call"); - grpc_server_register_method_import = (grpc_server_register_method_type) GetProcAddress(library, "grpc_server_register_method"); - grpc_server_request_registered_call_import = (grpc_server_request_registered_call_type) GetProcAddress(library, "grpc_server_request_registered_call"); - grpc_server_create_import = (grpc_server_create_type) GetProcAddress(library, "grpc_server_create"); - grpc_server_register_completion_queue_import = (grpc_server_register_completion_queue_type) GetProcAddress(library, "grpc_server_register_completion_queue"); - grpc_server_register_non_listening_completion_queue_import = (grpc_server_register_non_listening_completion_queue_type) GetProcAddress(library, "grpc_server_register_non_listening_completion_queue"); - grpc_server_add_insecure_http2_port_import = (grpc_server_add_insecure_http2_port_type) GetProcAddress(library, "grpc_server_add_insecure_http2_port"); - grpc_server_start_import = (grpc_server_start_type) GetProcAddress(library, "grpc_server_start"); - grpc_server_shutdown_and_notify_import = (grpc_server_shutdown_and_notify_type) GetProcAddress(library, "grpc_server_shutdown_and_notify"); - grpc_server_cancel_all_calls_import = (grpc_server_cancel_all_calls_type) GetProcAddress(library, "grpc_server_cancel_all_calls"); - grpc_server_destroy_import = (grpc_server_destroy_type) GetProcAddress(library, "grpc_server_destroy"); - grpc_tracer_set_enabled_import = (grpc_tracer_set_enabled_type) GetProcAddress(library, "grpc_tracer_set_enabled"); - grpc_header_key_is_legal_import = (grpc_header_key_is_legal_type) GetProcAddress(library, "grpc_header_key_is_legal"); - grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal"); - grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header"); - grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string"); - grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd"); - grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd"); - grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal"); - grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next"); - grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator"); - grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity"); - grpc_auth_context_find_properties_by_name_import = (grpc_auth_context_find_properties_by_name_type) GetProcAddress(library, "grpc_auth_context_find_properties_by_name"); - grpc_auth_context_peer_identity_property_name_import = (grpc_auth_context_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_peer_identity_property_name"); - grpc_auth_context_peer_is_authenticated_import = (grpc_auth_context_peer_is_authenticated_type) GetProcAddress(library, "grpc_auth_context_peer_is_authenticated"); - grpc_call_auth_context_import = (grpc_call_auth_context_type) GetProcAddress(library, "grpc_call_auth_context"); - grpc_auth_context_release_import = (grpc_auth_context_release_type) GetProcAddress(library, "grpc_auth_context_release"); - grpc_auth_context_add_property_import = (grpc_auth_context_add_property_type) GetProcAddress(library, "grpc_auth_context_add_property"); - grpc_auth_context_add_cstring_property_import = (grpc_auth_context_add_cstring_property_type) GetProcAddress(library, "grpc_auth_context_add_cstring_property"); - grpc_auth_context_set_peer_identity_property_name_import = (grpc_auth_context_set_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_set_peer_identity_property_name"); - grpc_channel_credentials_release_import = (grpc_channel_credentials_release_type) GetProcAddress(library, "grpc_channel_credentials_release"); - grpc_google_default_credentials_create_import = (grpc_google_default_credentials_create_type) GetProcAddress(library, "grpc_google_default_credentials_create"); - grpc_set_ssl_roots_override_callback_import = (grpc_set_ssl_roots_override_callback_type) GetProcAddress(library, "grpc_set_ssl_roots_override_callback"); - grpc_ssl_credentials_create_import = (grpc_ssl_credentials_create_type) GetProcAddress(library, "grpc_ssl_credentials_create"); - grpc_call_credentials_release_import = (grpc_call_credentials_release_type) GetProcAddress(library, "grpc_call_credentials_release"); - grpc_composite_channel_credentials_create_import = (grpc_composite_channel_credentials_create_type) GetProcAddress(library, "grpc_composite_channel_credentials_create"); - grpc_composite_call_credentials_create_import = (grpc_composite_call_credentials_create_type) GetProcAddress(library, "grpc_composite_call_credentials_create"); - grpc_google_compute_engine_credentials_create_import = (grpc_google_compute_engine_credentials_create_type) GetProcAddress(library, "grpc_google_compute_engine_credentials_create"); - grpc_max_auth_token_lifetime_import = (grpc_max_auth_token_lifetime_type) GetProcAddress(library, "grpc_max_auth_token_lifetime"); - grpc_service_account_jwt_access_credentials_create_import = (grpc_service_account_jwt_access_credentials_create_type) GetProcAddress(library, "grpc_service_account_jwt_access_credentials_create"); - grpc_google_refresh_token_credentials_create_import = (grpc_google_refresh_token_credentials_create_type) GetProcAddress(library, "grpc_google_refresh_token_credentials_create"); - grpc_access_token_credentials_create_import = (grpc_access_token_credentials_create_type) GetProcAddress(library, "grpc_access_token_credentials_create"); - grpc_google_iam_credentials_create_import = (grpc_google_iam_credentials_create_type) GetProcAddress(library, "grpc_google_iam_credentials_create"); - grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin"); - grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create"); - grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release"); - grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create"); - grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex"); - grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port"); - grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials"); - grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor"); - gpr_malloc_import = (gpr_malloc_type) GetProcAddress(library, "gpr_malloc"); - gpr_free_import = (gpr_free_type) GetProcAddress(library, "gpr_free"); - gpr_realloc_import = (gpr_realloc_type) GetProcAddress(library, "gpr_realloc"); - gpr_malloc_aligned_import = (gpr_malloc_aligned_type) GetProcAddress(library, "gpr_malloc_aligned"); - gpr_free_aligned_import = (gpr_free_aligned_type) GetProcAddress(library, "gpr_free_aligned"); - gpr_set_allocation_functions_import = (gpr_set_allocation_functions_type) GetProcAddress(library, "gpr_set_allocation_functions"); - gpr_get_allocation_functions_import = (gpr_get_allocation_functions_type) GetProcAddress(library, "gpr_get_allocation_functions"); - grpc_raw_byte_buffer_create_import = (grpc_raw_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_byte_buffer_create"); - grpc_raw_compressed_byte_buffer_create_import = (grpc_raw_compressed_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_compressed_byte_buffer_create"); - grpc_byte_buffer_copy_import = (grpc_byte_buffer_copy_type) GetProcAddress(library, "grpc_byte_buffer_copy"); - grpc_byte_buffer_length_import = (grpc_byte_buffer_length_type) GetProcAddress(library, "grpc_byte_buffer_length"); - grpc_byte_buffer_destroy_import = (grpc_byte_buffer_destroy_type) GetProcAddress(library, "grpc_byte_buffer_destroy"); - grpc_byte_buffer_reader_init_import = (grpc_byte_buffer_reader_init_type) GetProcAddress(library, "grpc_byte_buffer_reader_init"); - grpc_byte_buffer_reader_destroy_import = (grpc_byte_buffer_reader_destroy_type) GetProcAddress(library, "grpc_byte_buffer_reader_destroy"); - grpc_byte_buffer_reader_next_import = (grpc_byte_buffer_reader_next_type) GetProcAddress(library, "grpc_byte_buffer_reader_next"); - grpc_byte_buffer_reader_readall_import = (grpc_byte_buffer_reader_readall_type) GetProcAddress(library, "grpc_byte_buffer_reader_readall"); - grpc_raw_byte_buffer_from_reader_import = (grpc_raw_byte_buffer_from_reader_type) GetProcAddress(library, "grpc_raw_byte_buffer_from_reader"); - gpr_log_import = (gpr_log_type) GetProcAddress(library, "gpr_log"); - gpr_log_message_import = (gpr_log_message_type) GetProcAddress(library, "gpr_log_message"); - gpr_set_log_verbosity_import = (gpr_set_log_verbosity_type) GetProcAddress(library, "gpr_set_log_verbosity"); - gpr_log_verbosity_init_import = (gpr_log_verbosity_init_type) GetProcAddress(library, "gpr_log_verbosity_init"); - gpr_set_log_function_import = (gpr_set_log_function_type) GetProcAddress(library, "gpr_set_log_function"); - gpr_slice_ref_import = (gpr_slice_ref_type) GetProcAddress(library, "gpr_slice_ref"); - gpr_slice_unref_import = (gpr_slice_unref_type) GetProcAddress(library, "gpr_slice_unref"); - gpr_slice_new_import = (gpr_slice_new_type) GetProcAddress(library, "gpr_slice_new"); - gpr_slice_new_with_len_import = (gpr_slice_new_with_len_type) GetProcAddress(library, "gpr_slice_new_with_len"); - gpr_slice_malloc_import = (gpr_slice_malloc_type) GetProcAddress(library, "gpr_slice_malloc"); - gpr_slice_from_copied_string_import = (gpr_slice_from_copied_string_type) GetProcAddress(library, "gpr_slice_from_copied_string"); - gpr_slice_from_copied_buffer_import = (gpr_slice_from_copied_buffer_type) GetProcAddress(library, "gpr_slice_from_copied_buffer"); - gpr_slice_from_static_string_import = (gpr_slice_from_static_string_type) GetProcAddress(library, "gpr_slice_from_static_string"); - gpr_slice_sub_import = (gpr_slice_sub_type) GetProcAddress(library, "gpr_slice_sub"); - gpr_slice_sub_no_ref_import = (gpr_slice_sub_no_ref_type) GetProcAddress(library, "gpr_slice_sub_no_ref"); - gpr_slice_split_tail_import = (gpr_slice_split_tail_type) GetProcAddress(library, "gpr_slice_split_tail"); - gpr_slice_split_head_import = (gpr_slice_split_head_type) GetProcAddress(library, "gpr_slice_split_head"); - gpr_empty_slice_import = (gpr_empty_slice_type) GetProcAddress(library, "gpr_empty_slice"); - gpr_slice_cmp_import = (gpr_slice_cmp_type) GetProcAddress(library, "gpr_slice_cmp"); - gpr_slice_str_cmp_import = (gpr_slice_str_cmp_type) GetProcAddress(library, "gpr_slice_str_cmp"); - gpr_slice_buffer_init_import = (gpr_slice_buffer_init_type) GetProcAddress(library, "gpr_slice_buffer_init"); - gpr_slice_buffer_destroy_import = (gpr_slice_buffer_destroy_type) GetProcAddress(library, "gpr_slice_buffer_destroy"); - gpr_slice_buffer_add_import = (gpr_slice_buffer_add_type) GetProcAddress(library, "gpr_slice_buffer_add"); - gpr_slice_buffer_add_indexed_import = (gpr_slice_buffer_add_indexed_type) GetProcAddress(library, "gpr_slice_buffer_add_indexed"); - gpr_slice_buffer_addn_import = (gpr_slice_buffer_addn_type) GetProcAddress(library, "gpr_slice_buffer_addn"); - gpr_slice_buffer_tiny_add_import = (gpr_slice_buffer_tiny_add_type) GetProcAddress(library, "gpr_slice_buffer_tiny_add"); - gpr_slice_buffer_pop_import = (gpr_slice_buffer_pop_type) GetProcAddress(library, "gpr_slice_buffer_pop"); - gpr_slice_buffer_reset_and_unref_import = (gpr_slice_buffer_reset_and_unref_type) GetProcAddress(library, "gpr_slice_buffer_reset_and_unref"); - gpr_slice_buffer_swap_import = (gpr_slice_buffer_swap_type) GetProcAddress(library, "gpr_slice_buffer_swap"); - gpr_slice_buffer_move_into_import = (gpr_slice_buffer_move_into_type) GetProcAddress(library, "gpr_slice_buffer_move_into"); - gpr_slice_buffer_trim_end_import = (gpr_slice_buffer_trim_end_type) GetProcAddress(library, "gpr_slice_buffer_trim_end"); - gpr_slice_buffer_move_first_import = (gpr_slice_buffer_move_first_type) GetProcAddress(library, "gpr_slice_buffer_move_first"); - gpr_slice_buffer_take_first_import = (gpr_slice_buffer_take_first_type) GetProcAddress(library, "gpr_slice_buffer_take_first"); - gpr_mu_init_import = (gpr_mu_init_type) GetProcAddress(library, "gpr_mu_init"); - gpr_mu_destroy_import = (gpr_mu_destroy_type) GetProcAddress(library, "gpr_mu_destroy"); - gpr_mu_lock_import = (gpr_mu_lock_type) GetProcAddress(library, "gpr_mu_lock"); - gpr_mu_unlock_import = (gpr_mu_unlock_type) GetProcAddress(library, "gpr_mu_unlock"); - gpr_mu_trylock_import = (gpr_mu_trylock_type) GetProcAddress(library, "gpr_mu_trylock"); - gpr_cv_init_import = (gpr_cv_init_type) GetProcAddress(library, "gpr_cv_init"); - gpr_cv_destroy_import = (gpr_cv_destroy_type) GetProcAddress(library, "gpr_cv_destroy"); - gpr_cv_wait_import = (gpr_cv_wait_type) GetProcAddress(library, "gpr_cv_wait"); - gpr_cv_signal_import = (gpr_cv_signal_type) GetProcAddress(library, "gpr_cv_signal"); - gpr_cv_broadcast_import = (gpr_cv_broadcast_type) GetProcAddress(library, "gpr_cv_broadcast"); - gpr_once_init_import = (gpr_once_init_type) GetProcAddress(library, "gpr_once_init"); - gpr_event_init_import = (gpr_event_init_type) GetProcAddress(library, "gpr_event_init"); - gpr_event_set_import = (gpr_event_set_type) GetProcAddress(library, "gpr_event_set"); - gpr_event_get_import = (gpr_event_get_type) GetProcAddress(library, "gpr_event_get"); - gpr_event_wait_import = (gpr_event_wait_type) GetProcAddress(library, "gpr_event_wait"); - gpr_ref_init_import = (gpr_ref_init_type) GetProcAddress(library, "gpr_ref_init"); - gpr_ref_import = (gpr_ref_type) GetProcAddress(library, "gpr_ref"); - gpr_ref_non_zero_import = (gpr_ref_non_zero_type) GetProcAddress(library, "gpr_ref_non_zero"); - gpr_refn_import = (gpr_refn_type) GetProcAddress(library, "gpr_refn"); - gpr_unref_import = (gpr_unref_type) GetProcAddress(library, "gpr_unref"); - gpr_stats_init_import = (gpr_stats_init_type) GetProcAddress(library, "gpr_stats_init"); - gpr_stats_inc_import = (gpr_stats_inc_type) GetProcAddress(library, "gpr_stats_inc"); - gpr_stats_read_import = (gpr_stats_read_type) GetProcAddress(library, "gpr_stats_read"); - gpr_time_0_import = (gpr_time_0_type) GetProcAddress(library, "gpr_time_0"); - gpr_inf_future_import = (gpr_inf_future_type) GetProcAddress(library, "gpr_inf_future"); - gpr_inf_past_import = (gpr_inf_past_type) GetProcAddress(library, "gpr_inf_past"); - gpr_time_init_import = (gpr_time_init_type) GetProcAddress(library, "gpr_time_init"); - gpr_now_import = (gpr_now_type) GetProcAddress(library, "gpr_now"); - gpr_convert_clock_type_import = (gpr_convert_clock_type_type) GetProcAddress(library, "gpr_convert_clock_type"); - gpr_time_cmp_import = (gpr_time_cmp_type) GetProcAddress(library, "gpr_time_cmp"); - gpr_time_max_import = (gpr_time_max_type) GetProcAddress(library, "gpr_time_max"); - gpr_time_min_import = (gpr_time_min_type) GetProcAddress(library, "gpr_time_min"); - gpr_time_add_import = (gpr_time_add_type) GetProcAddress(library, "gpr_time_add"); - gpr_time_sub_import = (gpr_time_sub_type) GetProcAddress(library, "gpr_time_sub"); - gpr_time_from_micros_import = (gpr_time_from_micros_type) GetProcAddress(library, "gpr_time_from_micros"); - gpr_time_from_nanos_import = (gpr_time_from_nanos_type) GetProcAddress(library, "gpr_time_from_nanos"); - gpr_time_from_millis_import = (gpr_time_from_millis_type) GetProcAddress(library, "gpr_time_from_millis"); - gpr_time_from_seconds_import = (gpr_time_from_seconds_type) GetProcAddress(library, "gpr_time_from_seconds"); - gpr_time_from_minutes_import = (gpr_time_from_minutes_type) GetProcAddress(library, "gpr_time_from_minutes"); - gpr_time_from_hours_import = (gpr_time_from_hours_type) GetProcAddress(library, "gpr_time_from_hours"); - gpr_time_to_millis_import = (gpr_time_to_millis_type) GetProcAddress(library, "gpr_time_to_millis"); - gpr_time_similar_import = (gpr_time_similar_type) GetProcAddress(library, "gpr_time_similar"); - gpr_sleep_until_import = (gpr_sleep_until_type) GetProcAddress(library, "gpr_sleep_until"); - gpr_timespec_to_micros_import = (gpr_timespec_to_micros_type) GetProcAddress(library, "gpr_timespec_to_micros"); - gpr_avl_create_import = (gpr_avl_create_type) GetProcAddress(library, "gpr_avl_create"); - gpr_avl_ref_import = (gpr_avl_ref_type) GetProcAddress(library, "gpr_avl_ref"); - gpr_avl_unref_import = (gpr_avl_unref_type) GetProcAddress(library, "gpr_avl_unref"); - gpr_avl_add_import = (gpr_avl_add_type) GetProcAddress(library, "gpr_avl_add"); - gpr_avl_remove_import = (gpr_avl_remove_type) GetProcAddress(library, "gpr_avl_remove"); - gpr_avl_get_import = (gpr_avl_get_type) GetProcAddress(library, "gpr_avl_get"); - gpr_avl_maybe_get_import = (gpr_avl_maybe_get_type) GetProcAddress(library, "gpr_avl_maybe_get"); - gpr_avl_is_empty_import = (gpr_avl_is_empty_type) GetProcAddress(library, "gpr_avl_is_empty"); - gpr_cmdline_create_import = (gpr_cmdline_create_type) GetProcAddress(library, "gpr_cmdline_create"); - gpr_cmdline_add_int_import = (gpr_cmdline_add_int_type) GetProcAddress(library, "gpr_cmdline_add_int"); - gpr_cmdline_add_flag_import = (gpr_cmdline_add_flag_type) GetProcAddress(library, "gpr_cmdline_add_flag"); - gpr_cmdline_add_string_import = (gpr_cmdline_add_string_type) GetProcAddress(library, "gpr_cmdline_add_string"); - gpr_cmdline_on_extra_arg_import = (gpr_cmdline_on_extra_arg_type) GetProcAddress(library, "gpr_cmdline_on_extra_arg"); - gpr_cmdline_set_survive_failure_import = (gpr_cmdline_set_survive_failure_type) GetProcAddress(library, "gpr_cmdline_set_survive_failure"); - gpr_cmdline_parse_import = (gpr_cmdline_parse_type) GetProcAddress(library, "gpr_cmdline_parse"); - gpr_cmdline_destroy_import = (gpr_cmdline_destroy_type) GetProcAddress(library, "gpr_cmdline_destroy"); - gpr_cmdline_usage_string_import = (gpr_cmdline_usage_string_type) GetProcAddress(library, "gpr_cmdline_usage_string"); - gpr_cpu_num_cores_import = (gpr_cpu_num_cores_type) GetProcAddress(library, "gpr_cpu_num_cores"); - gpr_cpu_current_cpu_import = (gpr_cpu_current_cpu_type) GetProcAddress(library, "gpr_cpu_current_cpu"); - gpr_histogram_create_import = (gpr_histogram_create_type) GetProcAddress(library, "gpr_histogram_create"); - gpr_histogram_destroy_import = (gpr_histogram_destroy_type) GetProcAddress(library, "gpr_histogram_destroy"); - gpr_histogram_add_import = (gpr_histogram_add_type) GetProcAddress(library, "gpr_histogram_add"); - gpr_histogram_merge_import = (gpr_histogram_merge_type) GetProcAddress(library, "gpr_histogram_merge"); - gpr_histogram_percentile_import = (gpr_histogram_percentile_type) GetProcAddress(library, "gpr_histogram_percentile"); - gpr_histogram_mean_import = (gpr_histogram_mean_type) GetProcAddress(library, "gpr_histogram_mean"); - gpr_histogram_stddev_import = (gpr_histogram_stddev_type) GetProcAddress(library, "gpr_histogram_stddev"); - gpr_histogram_variance_import = (gpr_histogram_variance_type) GetProcAddress(library, "gpr_histogram_variance"); - gpr_histogram_maximum_import = (gpr_histogram_maximum_type) GetProcAddress(library, "gpr_histogram_maximum"); - gpr_histogram_minimum_import = (gpr_histogram_minimum_type) GetProcAddress(library, "gpr_histogram_minimum"); - gpr_histogram_count_import = (gpr_histogram_count_type) GetProcAddress(library, "gpr_histogram_count"); - gpr_histogram_sum_import = (gpr_histogram_sum_type) GetProcAddress(library, "gpr_histogram_sum"); - gpr_histogram_sum_of_squares_import = (gpr_histogram_sum_of_squares_type) GetProcAddress(library, "gpr_histogram_sum_of_squares"); - gpr_histogram_get_contents_import = (gpr_histogram_get_contents_type) GetProcAddress(library, "gpr_histogram_get_contents"); - gpr_histogram_merge_contents_import = (gpr_histogram_merge_contents_type) GetProcAddress(library, "gpr_histogram_merge_contents"); - gpr_join_host_port_import = (gpr_join_host_port_type) GetProcAddress(library, "gpr_join_host_port"); - gpr_split_host_port_import = (gpr_split_host_port_type) GetProcAddress(library, "gpr_split_host_port"); - gpr_format_message_import = (gpr_format_message_type) GetProcAddress(library, "gpr_format_message"); - gpr_strdup_import = (gpr_strdup_type) GetProcAddress(library, "gpr_strdup"); - gpr_asprintf_import = (gpr_asprintf_type) GetProcAddress(library, "gpr_asprintf"); - gpr_subprocess_binary_extension_import = (gpr_subprocess_binary_extension_type) GetProcAddress(library, "gpr_subprocess_binary_extension"); - gpr_subprocess_create_import = (gpr_subprocess_create_type) GetProcAddress(library, "gpr_subprocess_create"); - gpr_subprocess_destroy_import = (gpr_subprocess_destroy_type) GetProcAddress(library, "gpr_subprocess_destroy"); - gpr_subprocess_join_import = (gpr_subprocess_join_type) GetProcAddress(library, "gpr_subprocess_join"); - gpr_subprocess_interrupt_import = (gpr_subprocess_interrupt_type) GetProcAddress(library, "gpr_subprocess_interrupt"); - gpr_thd_new_import = (gpr_thd_new_type) GetProcAddress(library, "gpr_thd_new"); - gpr_thd_options_default_import = (gpr_thd_options_default_type) GetProcAddress(library, "gpr_thd_options_default"); - gpr_thd_options_set_detached_import = (gpr_thd_options_set_detached_type) GetProcAddress(library, "gpr_thd_options_set_detached"); - gpr_thd_options_set_joinable_import = (gpr_thd_options_set_joinable_type) GetProcAddress(library, "gpr_thd_options_set_joinable"); - gpr_thd_options_is_detached_import = (gpr_thd_options_is_detached_type) GetProcAddress(library, "gpr_thd_options_is_detached"); - gpr_thd_options_is_joinable_import = (gpr_thd_options_is_joinable_type) GetProcAddress(library, "gpr_thd_options_is_joinable"); - gpr_thd_currentid_import = (gpr_thd_currentid_type) GetProcAddress(library, "gpr_thd_currentid"); - gpr_thd_join_import = (gpr_thd_join_type) GetProcAddress(library, "gpr_thd_join"); -} - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - -#endif /* !GPR_WINDOWS */ diff --git a/src/python/grpcio/grpc/_cython/imports.generated.h b/src/python/grpcio/grpc/_cython/imports.generated.h index b3e341fe25..8e5c9a8ce2 100644 --- a/src/python/grpcio/grpc/_cython/imports.generated.h +++ b/src/python/grpcio/grpc/_cython/imports.generated.h @@ -31,860 +31,12 @@ * */ +/* TODO(atash) remove cruft */ #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #include <grpc/support/port_platform.h> -#ifdef GPR_WINDOWS - -#include <windows.h> - -#include <grpc/census.h> -#include <grpc/compression.h> -#include <grpc/grpc.h> -#include <grpc/grpc_posix.h> -#include <grpc/grpc_security.h> -#include <grpc/impl/codegen/alloc.h> -#include <grpc/impl/codegen/byte_buffer.h> -#include <grpc/impl/codegen/log.h> -#include <grpc/impl/codegen/slice.h> -#include <grpc/impl/codegen/slice_buffer.h> -#include <grpc/impl/codegen/sync.h> -#include <grpc/impl/codegen/time.h> -#include <grpc/support/avl.h> -#include <grpc/support/cmdline.h> -#include <grpc/support/cpu.h> -#include <grpc/support/histogram.h> -#include <grpc/support/host_port.h> -#include <grpc/support/log_windows.h> -#include <grpc/support/string_util.h> -#include <grpc/support/subprocess.h> -#include <grpc/support/thd.h> - -typedef int(*census_initialize_type)(int features); -extern census_initialize_type census_initialize_import; -#define census_initialize census_initialize_import -typedef void(*census_shutdown_type)(void); -extern census_shutdown_type census_shutdown_import; -#define census_shutdown census_shutdown_import -typedef int(*census_supported_type)(void); -extern census_supported_type census_supported_import; -#define census_supported census_supported_import -typedef int(*census_enabled_type)(void); -extern census_enabled_type census_enabled_import; -#define census_enabled census_enabled_import -typedef census_context *(*census_context_create_type)(const census_context *base, const census_tag *tags, int ntags, census_context_status const **status); -extern census_context_create_type census_context_create_import; -#define census_context_create census_context_create_import -typedef void(*census_context_destroy_type)(census_context *context); -extern census_context_destroy_type census_context_destroy_import; -#define census_context_destroy census_context_destroy_import -typedef const census_context_status *(*census_context_get_status_type)(const census_context *context); -extern census_context_get_status_type census_context_get_status_import; -#define census_context_get_status census_context_get_status_import -typedef void(*census_context_initialize_iterator_type)(const census_context *context, census_context_iterator *iterator); -extern census_context_initialize_iterator_type census_context_initialize_iterator_import; -#define census_context_initialize_iterator census_context_initialize_iterator_import -typedef int(*census_context_next_tag_type)(census_context_iterator *iterator, census_tag *tag); -extern census_context_next_tag_type census_context_next_tag_import; -#define census_context_next_tag census_context_next_tag_import -typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag); -extern census_context_get_tag_type census_context_get_tag_import; -#define census_context_get_tag census_context_get_tag_import -typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size); -extern census_context_encode_type census_context_encode_import; -#define census_context_encode census_context_encode_import -typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size); -extern census_context_decode_type census_context_decode_import; -#define census_context_decode census_context_decode_import -typedef int(*census_trace_mask_type)(const census_context *context); -extern census_trace_mask_type census_trace_mask_import; -#define census_trace_mask census_trace_mask_import -typedef void(*census_set_trace_mask_type)(int trace_mask); -extern census_set_trace_mask_type census_set_trace_mask_import; -#define census_set_trace_mask census_set_trace_mask_import -typedef census_timestamp(*census_start_rpc_op_timestamp_type)(void); -extern census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import; -#define census_start_rpc_op_timestamp census_start_rpc_op_timestamp_import -typedef census_context *(*census_start_client_rpc_op_type)(const census_context *context, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, const census_timestamp *start_time); -extern census_start_client_rpc_op_type census_start_client_rpc_op_import; -#define census_start_client_rpc_op census_start_client_rpc_op_import -typedef void(*census_set_rpc_client_peer_type)(census_context *context, const char *peer); -extern census_set_rpc_client_peer_type census_set_rpc_client_peer_import; -#define census_set_rpc_client_peer census_set_rpc_client_peer_import -typedef census_context *(*census_start_server_rpc_op_type)(const char *buffer, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, census_timestamp *start_time); -extern census_start_server_rpc_op_type census_start_server_rpc_op_import; -#define census_start_server_rpc_op census_start_server_rpc_op_import -typedef census_context *(*census_start_op_type)(census_context *context, const char *family, const char *name, int trace_mask); -extern census_start_op_type census_start_op_import; -#define census_start_op census_start_op_import -typedef void(*census_end_op_type)(census_context *context, int status); -extern census_end_op_type census_end_op_import; -#define census_end_op census_end_op_import -typedef void(*census_trace_print_type)(census_context *context, uint32_t type, const char *buffer, size_t n); -extern census_trace_print_type census_trace_print_import; -#define census_trace_print census_trace_print_import -typedef int(*census_trace_scan_start_type)(int consume); -extern census_trace_scan_start_type census_trace_scan_start_import; -#define census_trace_scan_start census_trace_scan_start_import -typedef int(*census_get_trace_record_type)(census_trace_record *trace_record); -extern census_get_trace_record_type census_get_trace_record_import; -#define census_get_trace_record census_get_trace_record_import -typedef void(*census_trace_scan_end_type)(); -extern census_trace_scan_end_type census_trace_scan_end_import; -#define census_trace_scan_end census_trace_scan_end_import -typedef void(*census_record_values_type)(census_context *context, census_value *values, size_t nvalues); -extern census_record_values_type census_record_values_import; -#define census_record_values census_record_values_import -typedef census_view *(*census_view_create_type)(uint32_t metric_id, const census_context *tags, const census_aggregation *aggregations, size_t naggregations); -extern census_view_create_type census_view_create_import; -#define census_view_create census_view_create_import -typedef void(*census_view_delete_type)(census_view *view); -extern census_view_delete_type census_view_delete_import; -#define census_view_delete census_view_delete_import -typedef size_t(*census_view_metric_type)(const census_view *view); -extern census_view_metric_type census_view_metric_import; -#define census_view_metric census_view_metric_import -typedef size_t(*census_view_naggregations_type)(const census_view *view); -extern census_view_naggregations_type census_view_naggregations_import; -#define census_view_naggregations census_view_naggregations_import -typedef const census_context *(*census_view_tags_type)(const census_view *view); -extern census_view_tags_type census_view_tags_import; -#define census_view_tags census_view_tags_import -typedef const census_aggregation *(*census_view_aggregrations_type)(const census_view *view); -extern census_view_aggregrations_type census_view_aggregrations_import; -#define census_view_aggregrations census_view_aggregrations_import -typedef const census_view_data *(*census_view_get_data_type)(const census_view *view); -extern census_view_get_data_type census_view_get_data_import; -#define census_view_get_data census_view_get_data_import -typedef void(*census_view_reset_type)(census_view *view); -extern census_view_reset_type census_view_reset_import; -#define census_view_reset census_view_reset_import -typedef int(*grpc_compression_algorithm_parse_type)(const char *name, size_t name_length, grpc_compression_algorithm *algorithm); -extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import; -#define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import -typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name); -extern grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import; -#define grpc_compression_algorithm_name grpc_compression_algorithm_name_import -typedef grpc_compression_algorithm(*grpc_compression_algorithm_for_level_type)(grpc_compression_level level, uint32_t accepted_encodings); -extern grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import; -#define grpc_compression_algorithm_for_level grpc_compression_algorithm_for_level_import -typedef void(*grpc_compression_options_init_type)(grpc_compression_options *opts); -extern grpc_compression_options_init_type grpc_compression_options_init_import; -#define grpc_compression_options_init grpc_compression_options_init_import -typedef void(*grpc_compression_options_enable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import; -#define grpc_compression_options_enable_algorithm grpc_compression_options_enable_algorithm_import -typedef void(*grpc_compression_options_disable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import; -#define grpc_compression_options_disable_algorithm grpc_compression_options_disable_algorithm_import -typedef int(*grpc_compression_options_is_algorithm_enabled_type)(const grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import; -#define grpc_compression_options_is_algorithm_enabled grpc_compression_options_is_algorithm_enabled_import -typedef void(*grpc_metadata_array_init_type)(grpc_metadata_array *array); -extern grpc_metadata_array_init_type grpc_metadata_array_init_import; -#define grpc_metadata_array_init grpc_metadata_array_init_import -typedef void(*grpc_metadata_array_destroy_type)(grpc_metadata_array *array); -extern grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import; -#define grpc_metadata_array_destroy grpc_metadata_array_destroy_import -typedef void(*grpc_call_details_init_type)(grpc_call_details *details); -extern grpc_call_details_init_type grpc_call_details_init_import; -#define grpc_call_details_init grpc_call_details_init_import -typedef void(*grpc_call_details_destroy_type)(grpc_call_details *details); -extern grpc_call_details_destroy_type grpc_call_details_destroy_import; -#define grpc_call_details_destroy grpc_call_details_destroy_import -typedef void(*grpc_register_plugin_type)(void (*init)(void), void (*destroy)(void)); -extern grpc_register_plugin_type grpc_register_plugin_import; -#define grpc_register_plugin grpc_register_plugin_import -typedef void(*grpc_init_type)(void); -extern grpc_init_type grpc_init_import; -#define grpc_init grpc_init_import -typedef void(*grpc_shutdown_type)(void); -extern grpc_shutdown_type grpc_shutdown_import; -#define grpc_shutdown grpc_shutdown_import -typedef const char *(*grpc_version_string_type)(void); -extern grpc_version_string_type grpc_version_string_import; -#define grpc_version_string grpc_version_string_import -typedef grpc_completion_queue *(*grpc_completion_queue_create_type)(void *reserved); -extern grpc_completion_queue_create_type grpc_completion_queue_create_import; -#define grpc_completion_queue_create grpc_completion_queue_create_import -typedef grpc_event(*grpc_completion_queue_next_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *reserved); -extern grpc_completion_queue_next_type grpc_completion_queue_next_import; -#define grpc_completion_queue_next grpc_completion_queue_next_import -typedef grpc_event(*grpc_completion_queue_pluck_type)(grpc_completion_queue *cq, void *tag, gpr_timespec deadline, void *reserved); -extern grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; -#define grpc_completion_queue_pluck grpc_completion_queue_pluck_import -typedef void(*grpc_completion_queue_shutdown_type)(grpc_completion_queue *cq); -extern grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import; -#define grpc_completion_queue_shutdown grpc_completion_queue_shutdown_import -typedef void(*grpc_completion_queue_destroy_type)(grpc_completion_queue *cq); -extern grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; -#define grpc_completion_queue_destroy grpc_completion_queue_destroy_import -typedef grpc_alarm *(*grpc_alarm_create_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *tag); -extern grpc_alarm_create_type grpc_alarm_create_import; -#define grpc_alarm_create grpc_alarm_create_import -typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm); -extern grpc_alarm_cancel_type grpc_alarm_cancel_import; -#define grpc_alarm_cancel grpc_alarm_cancel_import -typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm); -extern grpc_alarm_destroy_type grpc_alarm_destroy_import; -#define grpc_alarm_destroy grpc_alarm_destroy_import -typedef grpc_connectivity_state(*grpc_channel_check_connectivity_state_type)(grpc_channel *channel, int try_to_connect); -extern grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; -#define grpc_channel_check_connectivity_state grpc_channel_check_connectivity_state_import -typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag); -extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; -#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import -typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, const char *method, const char *host, gpr_timespec deadline, void *reserved); -extern grpc_channel_create_call_type grpc_channel_create_call_import; -#define grpc_channel_create_call grpc_channel_create_call_import -typedef void(*grpc_channel_ping_type)(grpc_channel *channel, grpc_completion_queue *cq, void *tag, void *reserved); -extern grpc_channel_ping_type grpc_channel_ping_import; -#define grpc_channel_ping grpc_channel_ping_import -typedef void *(*grpc_channel_register_call_type)(grpc_channel *channel, const char *method, const char *host, void *reserved); -extern grpc_channel_register_call_type grpc_channel_register_call_import; -#define grpc_channel_register_call grpc_channel_register_call_import -typedef grpc_call *(*grpc_channel_create_registered_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline, void *reserved); -extern grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import; -#define grpc_channel_create_registered_call grpc_channel_create_registered_call_import -typedef grpc_call_error(*grpc_call_start_batch_type)(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved); -extern grpc_call_start_batch_type grpc_call_start_batch_import; -#define grpc_call_start_batch grpc_call_start_batch_import -typedef char *(*grpc_call_get_peer_type)(grpc_call *call); -extern grpc_call_get_peer_type grpc_call_get_peer_import; -#define grpc_call_get_peer grpc_call_get_peer_import -typedef void(*grpc_census_call_set_context_type)(grpc_call *call, struct census_context *context); -extern grpc_census_call_set_context_type grpc_census_call_set_context_import; -#define grpc_census_call_set_context grpc_census_call_set_context_import -typedef struct census_context *(*grpc_census_call_get_context_type)(grpc_call *call); -extern grpc_census_call_get_context_type grpc_census_call_get_context_import; -#define grpc_census_call_get_context grpc_census_call_get_context_import -typedef char *(*grpc_channel_get_target_type)(grpc_channel *channel); -extern grpc_channel_get_target_type grpc_channel_get_target_import; -#define grpc_channel_get_target grpc_channel_get_target_import -typedef grpc_channel *(*grpc_insecure_channel_create_type)(const char *target, const grpc_channel_args *args, void *reserved); -extern grpc_insecure_channel_create_type grpc_insecure_channel_create_import; -#define grpc_insecure_channel_create grpc_insecure_channel_create_import -typedef grpc_channel *(*grpc_lame_client_channel_create_type)(const char *target, grpc_status_code error_code, const char *error_message); -extern grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import; -#define grpc_lame_client_channel_create grpc_lame_client_channel_create_import -typedef void(*grpc_channel_destroy_type)(grpc_channel *channel); -extern grpc_channel_destroy_type grpc_channel_destroy_import; -#define grpc_channel_destroy grpc_channel_destroy_import -typedef grpc_call_error(*grpc_call_cancel_type)(grpc_call *call, void *reserved); -extern grpc_call_cancel_type grpc_call_cancel_import; -#define grpc_call_cancel grpc_call_cancel_import -typedef grpc_call_error(*grpc_call_cancel_with_status_type)(grpc_call *call, grpc_status_code status, const char *description, void *reserved); -extern grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import; -#define grpc_call_cancel_with_status grpc_call_cancel_with_status_import -typedef void(*grpc_call_destroy_type)(grpc_call *call); -extern grpc_call_destroy_type grpc_call_destroy_import; -#define grpc_call_destroy grpc_call_destroy_import -typedef grpc_call_error(*grpc_server_request_call_type)(grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -extern grpc_server_request_call_type grpc_server_request_call_import; -#define grpc_server_request_call grpc_server_request_call_import -typedef void *(*grpc_server_register_method_type)(grpc_server *server, const char *method, const char *host, grpc_server_register_method_payload_handling payload_handling, uint32_t flags); -extern grpc_server_register_method_type grpc_server_register_method_import; -#define grpc_server_register_method grpc_server_register_method_import -typedef grpc_call_error(*grpc_server_request_registered_call_type)(grpc_server *server, void *registered_method, grpc_call **call, gpr_timespec *deadline, grpc_metadata_array *request_metadata, grpc_byte_buffer **optional_payload, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -extern grpc_server_request_registered_call_type grpc_server_request_registered_call_import; -#define grpc_server_request_registered_call grpc_server_request_registered_call_import -typedef grpc_server *(*grpc_server_create_type)(const grpc_channel_args *args, void *reserved); -extern grpc_server_create_type grpc_server_create_import; -#define grpc_server_create grpc_server_create_import -typedef void(*grpc_server_register_completion_queue_type)(grpc_server *server, grpc_completion_queue *cq, void *reserved); -extern grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import; -#define grpc_server_register_completion_queue grpc_server_register_completion_queue_import -typedef void(*grpc_server_register_non_listening_completion_queue_type)(grpc_server *server, grpc_completion_queue *q, void *reserved); -extern grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import; -#define grpc_server_register_non_listening_completion_queue grpc_server_register_non_listening_completion_queue_import -typedef int(*grpc_server_add_insecure_http2_port_type)(grpc_server *server, const char *addr); -extern grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import; -#define grpc_server_add_insecure_http2_port grpc_server_add_insecure_http2_port_import -typedef void(*grpc_server_start_type)(grpc_server *server); -extern grpc_server_start_type grpc_server_start_import; -#define grpc_server_start grpc_server_start_import -typedef void(*grpc_server_shutdown_and_notify_type)(grpc_server *server, grpc_completion_queue *cq, void *tag); -extern grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import; -#define grpc_server_shutdown_and_notify grpc_server_shutdown_and_notify_import -typedef void(*grpc_server_cancel_all_calls_type)(grpc_server *server); -extern grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import; -#define grpc_server_cancel_all_calls grpc_server_cancel_all_calls_import -typedef void(*grpc_server_destroy_type)(grpc_server *server); -extern grpc_server_destroy_type grpc_server_destroy_import; -#define grpc_server_destroy grpc_server_destroy_import -typedef int(*grpc_tracer_set_enabled_type)(const char *name, int enabled); -extern grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import; -#define grpc_tracer_set_enabled grpc_tracer_set_enabled_import -typedef int(*grpc_header_key_is_legal_type)(const char *key, size_t length); -extern grpc_header_key_is_legal_type grpc_header_key_is_legal_import; -#define grpc_header_key_is_legal grpc_header_key_is_legal_import -typedef int(*grpc_header_nonbin_value_is_legal_type)(const char *value, size_t length); -extern grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import; -#define grpc_header_nonbin_value_is_legal grpc_header_nonbin_value_is_legal_import -typedef int(*grpc_is_binary_header_type)(const char *key, size_t length); -extern grpc_is_binary_header_type grpc_is_binary_header_import; -#define grpc_is_binary_header grpc_is_binary_header_import -typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error); -extern grpc_call_error_to_string_type grpc_call_error_to_string_import; -#define grpc_call_error_to_string grpc_call_error_to_string_import -typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args); -extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import; -#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import -typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd); -extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import; -#define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import -typedef void(*grpc_use_signal_type)(int signum); -extern grpc_use_signal_type grpc_use_signal_import; -#define grpc_use_signal grpc_use_signal_import -typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it); -extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import; -#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import -typedef grpc_auth_property_iterator(*grpc_auth_context_property_iterator_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import; -#define grpc_auth_context_property_iterator grpc_auth_context_property_iterator_import -typedef grpc_auth_property_iterator(*grpc_auth_context_peer_identity_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import; -#define grpc_auth_context_peer_identity grpc_auth_context_peer_identity_import -typedef grpc_auth_property_iterator(*grpc_auth_context_find_properties_by_name_type)(const grpc_auth_context *ctx, const char *name); -extern grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import; -#define grpc_auth_context_find_properties_by_name grpc_auth_context_find_properties_by_name_import -typedef const char *(*grpc_auth_context_peer_identity_property_name_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import; -#define grpc_auth_context_peer_identity_property_name grpc_auth_context_peer_identity_property_name_import -typedef int(*grpc_auth_context_peer_is_authenticated_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import; -#define grpc_auth_context_peer_is_authenticated grpc_auth_context_peer_is_authenticated_import -typedef grpc_auth_context *(*grpc_call_auth_context_type)(grpc_call *call); -extern grpc_call_auth_context_type grpc_call_auth_context_import; -#define grpc_call_auth_context grpc_call_auth_context_import -typedef void(*grpc_auth_context_release_type)(grpc_auth_context *context); -extern grpc_auth_context_release_type grpc_auth_context_release_import; -#define grpc_auth_context_release grpc_auth_context_release_import -typedef void(*grpc_auth_context_add_property_type)(grpc_auth_context *ctx, const char *name, const char *value, size_t value_length); -extern grpc_auth_context_add_property_type grpc_auth_context_add_property_import; -#define grpc_auth_context_add_property grpc_auth_context_add_property_import -typedef void(*grpc_auth_context_add_cstring_property_type)(grpc_auth_context *ctx, const char *name, const char *value); -extern grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import; -#define grpc_auth_context_add_cstring_property grpc_auth_context_add_cstring_property_import -typedef int(*grpc_auth_context_set_peer_identity_property_name_type)(grpc_auth_context *ctx, const char *name); -extern grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import; -#define grpc_auth_context_set_peer_identity_property_name grpc_auth_context_set_peer_identity_property_name_import -typedef void(*grpc_channel_credentials_release_type)(grpc_channel_credentials *creds); -extern grpc_channel_credentials_release_type grpc_channel_credentials_release_import; -#define grpc_channel_credentials_release grpc_channel_credentials_release_import -typedef grpc_channel_credentials *(*grpc_google_default_credentials_create_type)(void); -extern grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import; -#define grpc_google_default_credentials_create grpc_google_default_credentials_create_import -typedef void(*grpc_set_ssl_roots_override_callback_type)(grpc_ssl_roots_override_callback cb); -extern grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import; -#define grpc_set_ssl_roots_override_callback grpc_set_ssl_roots_override_callback_import -typedef grpc_channel_credentials *(*grpc_ssl_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, void *reserved); -extern grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import; -#define grpc_ssl_credentials_create grpc_ssl_credentials_create_import -typedef void(*grpc_call_credentials_release_type)(grpc_call_credentials *creds); -extern grpc_call_credentials_release_type grpc_call_credentials_release_import; -#define grpc_call_credentials_release grpc_call_credentials_release_import -typedef grpc_channel_credentials *(*grpc_composite_channel_credentials_create_type)(grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, void *reserved); -extern grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import; -#define grpc_composite_channel_credentials_create grpc_composite_channel_credentials_create_import -typedef grpc_call_credentials *(*grpc_composite_call_credentials_create_type)(grpc_call_credentials *creds1, grpc_call_credentials *creds2, void *reserved); -extern grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import; -#define grpc_composite_call_credentials_create grpc_composite_call_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_compute_engine_credentials_create_type)(void *reserved); -extern grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import; -#define grpc_google_compute_engine_credentials_create grpc_google_compute_engine_credentials_create_import -typedef gpr_timespec(*grpc_max_auth_token_lifetime_type)(); -extern grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import; -#define grpc_max_auth_token_lifetime grpc_max_auth_token_lifetime_import -typedef grpc_call_credentials *(*grpc_service_account_jwt_access_credentials_create_type)(const char *json_key, gpr_timespec token_lifetime, void *reserved); -extern grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import; -#define grpc_service_account_jwt_access_credentials_create grpc_service_account_jwt_access_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_refresh_token_credentials_create_type)(const char *json_refresh_token, void *reserved); -extern grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import; -#define grpc_google_refresh_token_credentials_create grpc_google_refresh_token_credentials_create_import -typedef grpc_call_credentials *(*grpc_access_token_credentials_create_type)(const char *access_token, void *reserved); -extern grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import; -#define grpc_access_token_credentials_create grpc_access_token_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_iam_credentials_create_type)(const char *authorization_token, const char *authority_selector, void *reserved); -extern grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import; -#define grpc_google_iam_credentials_create grpc_google_iam_credentials_create_import -typedef grpc_call_credentials *(*grpc_metadata_credentials_create_from_plugin_type)(grpc_metadata_credentials_plugin plugin, void *reserved); -extern grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import; -#define grpc_metadata_credentials_create_from_plugin grpc_metadata_credentials_create_from_plugin_import -typedef grpc_channel *(*grpc_secure_channel_create_type)(grpc_channel_credentials *creds, const char *target, const grpc_channel_args *args, void *reserved); -extern grpc_secure_channel_create_type grpc_secure_channel_create_import; -#define grpc_secure_channel_create grpc_secure_channel_create_import -typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds); -extern grpc_server_credentials_release_type grpc_server_credentials_release_import; -#define grpc_server_credentials_release grpc_server_credentials_release_import -typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved); -extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; -#define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import -typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved); -extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; -#define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import -typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds); -extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; -#define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import -typedef grpc_call_error(*grpc_call_set_credentials_type)(grpc_call *call, grpc_call_credentials *creds); -extern grpc_call_set_credentials_type grpc_call_set_credentials_import; -#define grpc_call_set_credentials grpc_call_set_credentials_import -typedef void(*grpc_server_credentials_set_auth_metadata_processor_type)(grpc_server_credentials *creds, grpc_auth_metadata_processor processor); -extern grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import; -#define grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor_import -typedef void *(*gpr_malloc_type)(size_t size); -extern gpr_malloc_type gpr_malloc_import; -#define gpr_malloc gpr_malloc_import -typedef void(*gpr_free_type)(void *ptr); -extern gpr_free_type gpr_free_import; -#define gpr_free gpr_free_import -typedef void *(*gpr_realloc_type)(void *p, size_t size); -extern gpr_realloc_type gpr_realloc_import; -#define gpr_realloc gpr_realloc_import -typedef void *(*gpr_malloc_aligned_type)(size_t size, size_t alignment_log); -extern gpr_malloc_aligned_type gpr_malloc_aligned_import; -#define gpr_malloc_aligned gpr_malloc_aligned_import -typedef void(*gpr_free_aligned_type)(void *ptr); -extern gpr_free_aligned_type gpr_free_aligned_import; -#define gpr_free_aligned gpr_free_aligned_import -typedef void(*gpr_set_allocation_functions_type)(gpr_allocation_functions functions); -extern gpr_set_allocation_functions_type gpr_set_allocation_functions_import; -#define gpr_set_allocation_functions gpr_set_allocation_functions_import -typedef gpr_allocation_functions(*gpr_get_allocation_functions_type)(); -extern gpr_get_allocation_functions_type gpr_get_allocation_functions_import; -#define gpr_get_allocation_functions gpr_get_allocation_functions_import -typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_create_type)(gpr_slice *slices, size_t nslices); -extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import; -#define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import -typedef grpc_byte_buffer *(*grpc_raw_compressed_byte_buffer_create_type)(gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression); -extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import; -#define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import -typedef grpc_byte_buffer *(*grpc_byte_buffer_copy_type)(grpc_byte_buffer *bb); -extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import; -#define grpc_byte_buffer_copy grpc_byte_buffer_copy_import -typedef size_t(*grpc_byte_buffer_length_type)(grpc_byte_buffer *bb); -extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import; -#define grpc_byte_buffer_length grpc_byte_buffer_length_import -typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer); -extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import; -#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import -typedef void(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer); -extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import; -#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import -typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader); -extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import; -#define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import -typedef int(*grpc_byte_buffer_reader_next_type)(grpc_byte_buffer_reader *reader, gpr_slice *slice); -extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import; -#define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import -typedef gpr_slice(*grpc_byte_buffer_reader_readall_type)(grpc_byte_buffer_reader *reader); -extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import; -#define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import -typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader); -extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import; -#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import -typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5); -extern gpr_log_type gpr_log_import; -#define gpr_log gpr_log_import -typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message); -extern gpr_log_message_type gpr_log_message_import; -#define gpr_log_message gpr_log_message_import -typedef void(*gpr_set_log_verbosity_type)(gpr_log_severity min_severity_to_print); -extern gpr_set_log_verbosity_type gpr_set_log_verbosity_import; -#define gpr_set_log_verbosity gpr_set_log_verbosity_import -typedef void(*gpr_log_verbosity_init_type)(); -extern gpr_log_verbosity_init_type gpr_log_verbosity_init_import; -#define gpr_log_verbosity_init gpr_log_verbosity_init_import -typedef void(*gpr_set_log_function_type)(gpr_log_func func); -extern gpr_set_log_function_type gpr_set_log_function_import; -#define gpr_set_log_function gpr_set_log_function_import -typedef gpr_slice(*gpr_slice_ref_type)(gpr_slice s); -extern gpr_slice_ref_type gpr_slice_ref_import; -#define gpr_slice_ref gpr_slice_ref_import -typedef void(*gpr_slice_unref_type)(gpr_slice s); -extern gpr_slice_unref_type gpr_slice_unref_import; -#define gpr_slice_unref gpr_slice_unref_import -typedef gpr_slice(*gpr_slice_new_type)(void *p, size_t len, void (*destroy)(void *)); -extern gpr_slice_new_type gpr_slice_new_import; -#define gpr_slice_new gpr_slice_new_import -typedef gpr_slice(*gpr_slice_new_with_len_type)(void *p, size_t len, void (*destroy)(void *, size_t)); -extern gpr_slice_new_with_len_type gpr_slice_new_with_len_import; -#define gpr_slice_new_with_len gpr_slice_new_with_len_import -typedef gpr_slice(*gpr_slice_malloc_type)(size_t length); -extern gpr_slice_malloc_type gpr_slice_malloc_import; -#define gpr_slice_malloc gpr_slice_malloc_import -typedef gpr_slice(*gpr_slice_from_copied_string_type)(const char *source); -extern gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import; -#define gpr_slice_from_copied_string gpr_slice_from_copied_string_import -typedef gpr_slice(*gpr_slice_from_copied_buffer_type)(const char *source, size_t len); -extern gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import; -#define gpr_slice_from_copied_buffer gpr_slice_from_copied_buffer_import -typedef gpr_slice(*gpr_slice_from_static_string_type)(const char *source); -extern gpr_slice_from_static_string_type gpr_slice_from_static_string_import; -#define gpr_slice_from_static_string gpr_slice_from_static_string_import -typedef gpr_slice(*gpr_slice_sub_type)(gpr_slice s, size_t begin, size_t end); -extern gpr_slice_sub_type gpr_slice_sub_import; -#define gpr_slice_sub gpr_slice_sub_import -typedef gpr_slice(*gpr_slice_sub_no_ref_type)(gpr_slice s, size_t begin, size_t end); -extern gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import; -#define gpr_slice_sub_no_ref gpr_slice_sub_no_ref_import -typedef gpr_slice(*gpr_slice_split_tail_type)(gpr_slice *s, size_t split); -extern gpr_slice_split_tail_type gpr_slice_split_tail_import; -#define gpr_slice_split_tail gpr_slice_split_tail_import -typedef gpr_slice(*gpr_slice_split_head_type)(gpr_slice *s, size_t split); -extern gpr_slice_split_head_type gpr_slice_split_head_import; -#define gpr_slice_split_head gpr_slice_split_head_import -typedef gpr_slice(*gpr_empty_slice_type)(void); -extern gpr_empty_slice_type gpr_empty_slice_import; -#define gpr_empty_slice gpr_empty_slice_import -typedef int(*gpr_slice_cmp_type)(gpr_slice a, gpr_slice b); -extern gpr_slice_cmp_type gpr_slice_cmp_import; -#define gpr_slice_cmp gpr_slice_cmp_import -typedef int(*gpr_slice_str_cmp_type)(gpr_slice a, const char *b); -extern gpr_slice_str_cmp_type gpr_slice_str_cmp_import; -#define gpr_slice_str_cmp gpr_slice_str_cmp_import -typedef void(*gpr_slice_buffer_init_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_init_type gpr_slice_buffer_init_import; -#define gpr_slice_buffer_init gpr_slice_buffer_init_import -typedef void(*gpr_slice_buffer_destroy_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import; -#define gpr_slice_buffer_destroy gpr_slice_buffer_destroy_import -typedef void(*gpr_slice_buffer_add_type)(gpr_slice_buffer *sb, gpr_slice slice); -extern gpr_slice_buffer_add_type gpr_slice_buffer_add_import; -#define gpr_slice_buffer_add gpr_slice_buffer_add_import -typedef size_t(*gpr_slice_buffer_add_indexed_type)(gpr_slice_buffer *sb, gpr_slice slice); -extern gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import; -#define gpr_slice_buffer_add_indexed gpr_slice_buffer_add_indexed_import -typedef void(*gpr_slice_buffer_addn_type)(gpr_slice_buffer *sb, gpr_slice *slices, size_t n); -extern gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import; -#define gpr_slice_buffer_addn gpr_slice_buffer_addn_import -typedef uint8_t *(*gpr_slice_buffer_tiny_add_type)(gpr_slice_buffer *sb, size_t len); -extern gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import; -#define gpr_slice_buffer_tiny_add gpr_slice_buffer_tiny_add_import -typedef void(*gpr_slice_buffer_pop_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import; -#define gpr_slice_buffer_pop gpr_slice_buffer_pop_import -typedef void(*gpr_slice_buffer_reset_and_unref_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import; -#define gpr_slice_buffer_reset_and_unref gpr_slice_buffer_reset_and_unref_import -typedef void(*gpr_slice_buffer_swap_type)(gpr_slice_buffer *a, gpr_slice_buffer *b); -extern gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import; -#define gpr_slice_buffer_swap gpr_slice_buffer_swap_import -typedef void(*gpr_slice_buffer_move_into_type)(gpr_slice_buffer *src, gpr_slice_buffer *dst); -extern gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import; -#define gpr_slice_buffer_move_into gpr_slice_buffer_move_into_import -typedef void(*gpr_slice_buffer_trim_end_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *garbage); -extern gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import; -#define gpr_slice_buffer_trim_end gpr_slice_buffer_trim_end_import -typedef void(*gpr_slice_buffer_move_first_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *dst); -extern gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import; -#define gpr_slice_buffer_move_first gpr_slice_buffer_move_first_import -typedef gpr_slice(*gpr_slice_buffer_take_first_type)(gpr_slice_buffer *src); -extern gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import; -#define gpr_slice_buffer_take_first gpr_slice_buffer_take_first_import -typedef void(*gpr_mu_init_type)(gpr_mu *mu); -extern gpr_mu_init_type gpr_mu_init_import; -#define gpr_mu_init gpr_mu_init_import -typedef void(*gpr_mu_destroy_type)(gpr_mu *mu); -extern gpr_mu_destroy_type gpr_mu_destroy_import; -#define gpr_mu_destroy gpr_mu_destroy_import -typedef void(*gpr_mu_lock_type)(gpr_mu *mu); -extern gpr_mu_lock_type gpr_mu_lock_import; -#define gpr_mu_lock gpr_mu_lock_import -typedef void(*gpr_mu_unlock_type)(gpr_mu *mu); -extern gpr_mu_unlock_type gpr_mu_unlock_import; -#define gpr_mu_unlock gpr_mu_unlock_import -typedef int(*gpr_mu_trylock_type)(gpr_mu *mu); -extern gpr_mu_trylock_type gpr_mu_trylock_import; -#define gpr_mu_trylock gpr_mu_trylock_import -typedef void(*gpr_cv_init_type)(gpr_cv *cv); -extern gpr_cv_init_type gpr_cv_init_import; -#define gpr_cv_init gpr_cv_init_import -typedef void(*gpr_cv_destroy_type)(gpr_cv *cv); -extern gpr_cv_destroy_type gpr_cv_destroy_import; -#define gpr_cv_destroy gpr_cv_destroy_import -typedef int(*gpr_cv_wait_type)(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline); -extern gpr_cv_wait_type gpr_cv_wait_import; -#define gpr_cv_wait gpr_cv_wait_import -typedef void(*gpr_cv_signal_type)(gpr_cv *cv); -extern gpr_cv_signal_type gpr_cv_signal_import; -#define gpr_cv_signal gpr_cv_signal_import -typedef void(*gpr_cv_broadcast_type)(gpr_cv *cv); -extern gpr_cv_broadcast_type gpr_cv_broadcast_import; -#define gpr_cv_broadcast gpr_cv_broadcast_import -typedef void(*gpr_once_init_type)(gpr_once *once, void (*init_routine)(void)); -extern gpr_once_init_type gpr_once_init_import; -#define gpr_once_init gpr_once_init_import -typedef void(*gpr_event_init_type)(gpr_event *ev); -extern gpr_event_init_type gpr_event_init_import; -#define gpr_event_init gpr_event_init_import -typedef void(*gpr_event_set_type)(gpr_event *ev, void *value); -extern gpr_event_set_type gpr_event_set_import; -#define gpr_event_set gpr_event_set_import -typedef void *(*gpr_event_get_type)(gpr_event *ev); -extern gpr_event_get_type gpr_event_get_import; -#define gpr_event_get gpr_event_get_import -typedef void *(*gpr_event_wait_type)(gpr_event *ev, gpr_timespec abs_deadline); -extern gpr_event_wait_type gpr_event_wait_import; -#define gpr_event_wait gpr_event_wait_import -typedef void(*gpr_ref_init_type)(gpr_refcount *r, int n); -extern gpr_ref_init_type gpr_ref_init_import; -#define gpr_ref_init gpr_ref_init_import -typedef void(*gpr_ref_type)(gpr_refcount *r); -extern gpr_ref_type gpr_ref_import; -#define gpr_ref gpr_ref_import -typedef void(*gpr_ref_non_zero_type)(gpr_refcount *r); -extern gpr_ref_non_zero_type gpr_ref_non_zero_import; -#define gpr_ref_non_zero gpr_ref_non_zero_import -typedef void(*gpr_refn_type)(gpr_refcount *r, int n); -extern gpr_refn_type gpr_refn_import; -#define gpr_refn gpr_refn_import -typedef int(*gpr_unref_type)(gpr_refcount *r); -extern gpr_unref_type gpr_unref_import; -#define gpr_unref gpr_unref_import -typedef void(*gpr_stats_init_type)(gpr_stats_counter *c, intptr_t n); -extern gpr_stats_init_type gpr_stats_init_import; -#define gpr_stats_init gpr_stats_init_import -typedef void(*gpr_stats_inc_type)(gpr_stats_counter *c, intptr_t inc); -extern gpr_stats_inc_type gpr_stats_inc_import; -#define gpr_stats_inc gpr_stats_inc_import -typedef intptr_t(*gpr_stats_read_type)(const gpr_stats_counter *c); -extern gpr_stats_read_type gpr_stats_read_import; -#define gpr_stats_read gpr_stats_read_import -typedef gpr_timespec(*gpr_time_0_type)(gpr_clock_type type); -extern gpr_time_0_type gpr_time_0_import; -#define gpr_time_0 gpr_time_0_import -typedef gpr_timespec(*gpr_inf_future_type)(gpr_clock_type type); -extern gpr_inf_future_type gpr_inf_future_import; -#define gpr_inf_future gpr_inf_future_import -typedef gpr_timespec(*gpr_inf_past_type)(gpr_clock_type type); -extern gpr_inf_past_type gpr_inf_past_import; -#define gpr_inf_past gpr_inf_past_import -typedef void(*gpr_time_init_type)(void); -extern gpr_time_init_type gpr_time_init_import; -#define gpr_time_init gpr_time_init_import -typedef gpr_timespec(*gpr_now_type)(gpr_clock_type clock); -extern gpr_now_type gpr_now_import; -#define gpr_now gpr_now_import -typedef gpr_timespec(*gpr_convert_clock_type_type)(gpr_timespec t, gpr_clock_type target_clock); -extern gpr_convert_clock_type_type gpr_convert_clock_type_import; -#define gpr_convert_clock_type gpr_convert_clock_type_import -typedef int(*gpr_time_cmp_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_cmp_type gpr_time_cmp_import; -#define gpr_time_cmp gpr_time_cmp_import -typedef gpr_timespec(*gpr_time_max_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_max_type gpr_time_max_import; -#define gpr_time_max gpr_time_max_import -typedef gpr_timespec(*gpr_time_min_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_min_type gpr_time_min_import; -#define gpr_time_min gpr_time_min_import -typedef gpr_timespec(*gpr_time_add_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_add_type gpr_time_add_import; -#define gpr_time_add gpr_time_add_import -typedef gpr_timespec(*gpr_time_sub_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_sub_type gpr_time_sub_import; -#define gpr_time_sub gpr_time_sub_import -typedef gpr_timespec(*gpr_time_from_micros_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_micros_type gpr_time_from_micros_import; -#define gpr_time_from_micros gpr_time_from_micros_import -typedef gpr_timespec(*gpr_time_from_nanos_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_nanos_type gpr_time_from_nanos_import; -#define gpr_time_from_nanos gpr_time_from_nanos_import -typedef gpr_timespec(*gpr_time_from_millis_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_millis_type gpr_time_from_millis_import; -#define gpr_time_from_millis gpr_time_from_millis_import -typedef gpr_timespec(*gpr_time_from_seconds_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_seconds_type gpr_time_from_seconds_import; -#define gpr_time_from_seconds gpr_time_from_seconds_import -typedef gpr_timespec(*gpr_time_from_minutes_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_minutes_type gpr_time_from_minutes_import; -#define gpr_time_from_minutes gpr_time_from_minutes_import -typedef gpr_timespec(*gpr_time_from_hours_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_hours_type gpr_time_from_hours_import; -#define gpr_time_from_hours gpr_time_from_hours_import -typedef int32_t(*gpr_time_to_millis_type)(gpr_timespec timespec); -extern gpr_time_to_millis_type gpr_time_to_millis_import; -#define gpr_time_to_millis gpr_time_to_millis_import -typedef int(*gpr_time_similar_type)(gpr_timespec a, gpr_timespec b, gpr_timespec threshold); -extern gpr_time_similar_type gpr_time_similar_import; -#define gpr_time_similar gpr_time_similar_import -typedef void(*gpr_sleep_until_type)(gpr_timespec until); -extern gpr_sleep_until_type gpr_sleep_until_import; -#define gpr_sleep_until gpr_sleep_until_import -typedef double(*gpr_timespec_to_micros_type)(gpr_timespec t); -extern gpr_timespec_to_micros_type gpr_timespec_to_micros_import; -#define gpr_timespec_to_micros gpr_timespec_to_micros_import -typedef gpr_avl(*gpr_avl_create_type)(const gpr_avl_vtable *vtable); -extern gpr_avl_create_type gpr_avl_create_import; -#define gpr_avl_create gpr_avl_create_import -typedef gpr_avl(*gpr_avl_ref_type)(gpr_avl avl); -extern gpr_avl_ref_type gpr_avl_ref_import; -#define gpr_avl_ref gpr_avl_ref_import -typedef void(*gpr_avl_unref_type)(gpr_avl avl); -extern gpr_avl_unref_type gpr_avl_unref_import; -#define gpr_avl_unref gpr_avl_unref_import -typedef gpr_avl(*gpr_avl_add_type)(gpr_avl avl, void *key, void *value); -extern gpr_avl_add_type gpr_avl_add_import; -#define gpr_avl_add gpr_avl_add_import -typedef gpr_avl(*gpr_avl_remove_type)(gpr_avl avl, void *key); -extern gpr_avl_remove_type gpr_avl_remove_import; -#define gpr_avl_remove gpr_avl_remove_import -typedef void *(*gpr_avl_get_type)(gpr_avl avl, void *key); -extern gpr_avl_get_type gpr_avl_get_import; -#define gpr_avl_get gpr_avl_get_import -typedef int(*gpr_avl_maybe_get_type)(gpr_avl avl, void *key, void **value); -extern gpr_avl_maybe_get_type gpr_avl_maybe_get_import; -#define gpr_avl_maybe_get gpr_avl_maybe_get_import -typedef int(*gpr_avl_is_empty_type)(gpr_avl avl); -extern gpr_avl_is_empty_type gpr_avl_is_empty_import; -#define gpr_avl_is_empty gpr_avl_is_empty_import -typedef gpr_cmdline *(*gpr_cmdline_create_type)(const char *description); -extern gpr_cmdline_create_type gpr_cmdline_create_import; -#define gpr_cmdline_create gpr_cmdline_create_import -typedef void(*gpr_cmdline_add_int_type)(gpr_cmdline *cl, const char *name, const char *help, int *value); -extern gpr_cmdline_add_int_type gpr_cmdline_add_int_import; -#define gpr_cmdline_add_int gpr_cmdline_add_int_import -typedef void(*gpr_cmdline_add_flag_type)(gpr_cmdline *cl, const char *name, const char *help, int *value); -extern gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import; -#define gpr_cmdline_add_flag gpr_cmdline_add_flag_import -typedef void(*gpr_cmdline_add_string_type)(gpr_cmdline *cl, const char *name, const char *help, char **value); -extern gpr_cmdline_add_string_type gpr_cmdline_add_string_import; -#define gpr_cmdline_add_string gpr_cmdline_add_string_import -typedef void(*gpr_cmdline_on_extra_arg_type)(gpr_cmdline *cl, const char *name, const char *help, void (*on_extra_arg)(void *user_data, const char *arg), void *user_data); -extern gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import; -#define gpr_cmdline_on_extra_arg gpr_cmdline_on_extra_arg_import -typedef void(*gpr_cmdline_set_survive_failure_type)(gpr_cmdline *cl); -extern gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import; -#define gpr_cmdline_set_survive_failure gpr_cmdline_set_survive_failure_import -typedef int(*gpr_cmdline_parse_type)(gpr_cmdline *cl, int argc, char **argv); -extern gpr_cmdline_parse_type gpr_cmdline_parse_import; -#define gpr_cmdline_parse gpr_cmdline_parse_import -typedef void(*gpr_cmdline_destroy_type)(gpr_cmdline *cl); -extern gpr_cmdline_destroy_type gpr_cmdline_destroy_import; -#define gpr_cmdline_destroy gpr_cmdline_destroy_import -typedef char *(*gpr_cmdline_usage_string_type)(gpr_cmdline *cl, const char *argv0); -extern gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import; -#define gpr_cmdline_usage_string gpr_cmdline_usage_string_import -typedef unsigned(*gpr_cpu_num_cores_type)(void); -extern gpr_cpu_num_cores_type gpr_cpu_num_cores_import; -#define gpr_cpu_num_cores gpr_cpu_num_cores_import -typedef unsigned(*gpr_cpu_current_cpu_type)(void); -extern gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import; -#define gpr_cpu_current_cpu gpr_cpu_current_cpu_import -typedef gpr_histogram *(*gpr_histogram_create_type)(double resolution, double max_bucket_start); -extern gpr_histogram_create_type gpr_histogram_create_import; -#define gpr_histogram_create gpr_histogram_create_import -typedef void(*gpr_histogram_destroy_type)(gpr_histogram *h); -extern gpr_histogram_destroy_type gpr_histogram_destroy_import; -#define gpr_histogram_destroy gpr_histogram_destroy_import -typedef void(*gpr_histogram_add_type)(gpr_histogram *h, double x); -extern gpr_histogram_add_type gpr_histogram_add_import; -#define gpr_histogram_add gpr_histogram_add_import -typedef int(*gpr_histogram_merge_type)(gpr_histogram *dst, const gpr_histogram *src); -extern gpr_histogram_merge_type gpr_histogram_merge_import; -#define gpr_histogram_merge gpr_histogram_merge_import -typedef double(*gpr_histogram_percentile_type)(gpr_histogram *histogram, double percentile); -extern gpr_histogram_percentile_type gpr_histogram_percentile_import; -#define gpr_histogram_percentile gpr_histogram_percentile_import -typedef double(*gpr_histogram_mean_type)(gpr_histogram *histogram); -extern gpr_histogram_mean_type gpr_histogram_mean_import; -#define gpr_histogram_mean gpr_histogram_mean_import -typedef double(*gpr_histogram_stddev_type)(gpr_histogram *histogram); -extern gpr_histogram_stddev_type gpr_histogram_stddev_import; -#define gpr_histogram_stddev gpr_histogram_stddev_import -typedef double(*gpr_histogram_variance_type)(gpr_histogram *histogram); -extern gpr_histogram_variance_type gpr_histogram_variance_import; -#define gpr_histogram_variance gpr_histogram_variance_import -typedef double(*gpr_histogram_maximum_type)(gpr_histogram *histogram); -extern gpr_histogram_maximum_type gpr_histogram_maximum_import; -#define gpr_histogram_maximum gpr_histogram_maximum_import -typedef double(*gpr_histogram_minimum_type)(gpr_histogram *histogram); -extern gpr_histogram_minimum_type gpr_histogram_minimum_import; -#define gpr_histogram_minimum gpr_histogram_minimum_import -typedef double(*gpr_histogram_count_type)(gpr_histogram *histogram); -extern gpr_histogram_count_type gpr_histogram_count_import; -#define gpr_histogram_count gpr_histogram_count_import -typedef double(*gpr_histogram_sum_type)(gpr_histogram *histogram); -extern gpr_histogram_sum_type gpr_histogram_sum_import; -#define gpr_histogram_sum gpr_histogram_sum_import -typedef double(*gpr_histogram_sum_of_squares_type)(gpr_histogram *histogram); -extern gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import; -#define gpr_histogram_sum_of_squares gpr_histogram_sum_of_squares_import -typedef const uint32_t *(*gpr_histogram_get_contents_type)(gpr_histogram *histogram, size_t *count); -extern gpr_histogram_get_contents_type gpr_histogram_get_contents_import; -#define gpr_histogram_get_contents gpr_histogram_get_contents_import -typedef void(*gpr_histogram_merge_contents_type)(gpr_histogram *histogram, const uint32_t *data, size_t data_count, double min_seen, double max_seen, double sum, double sum_of_squares, double count); -extern gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import; -#define gpr_histogram_merge_contents gpr_histogram_merge_contents_import -typedef int(*gpr_join_host_port_type)(char **out, const char *host, int port); -extern gpr_join_host_port_type gpr_join_host_port_import; -#define gpr_join_host_port gpr_join_host_port_import -typedef int(*gpr_split_host_port_type)(const char *name, char **host, char **port); -extern gpr_split_host_port_type gpr_split_host_port_import; -#define gpr_split_host_port gpr_split_host_port_import -typedef char *(*gpr_format_message_type)(int messageid); -extern gpr_format_message_type gpr_format_message_import; -#define gpr_format_message gpr_format_message_import -typedef char *(*gpr_strdup_type)(const char *src); -extern gpr_strdup_type gpr_strdup_import; -#define gpr_strdup gpr_strdup_import -typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(2, 3); -extern gpr_asprintf_type gpr_asprintf_import; -#define gpr_asprintf gpr_asprintf_import -typedef const char *(*gpr_subprocess_binary_extension_type)(); -extern gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import; -#define gpr_subprocess_binary_extension gpr_subprocess_binary_extension_import -typedef gpr_subprocess *(*gpr_subprocess_create_type)(int argc, const char **argv); -extern gpr_subprocess_create_type gpr_subprocess_create_import; -#define gpr_subprocess_create gpr_subprocess_create_import -typedef void(*gpr_subprocess_destroy_type)(gpr_subprocess *p); -extern gpr_subprocess_destroy_type gpr_subprocess_destroy_import; -#define gpr_subprocess_destroy gpr_subprocess_destroy_import -typedef int(*gpr_subprocess_join_type)(gpr_subprocess *p); -extern gpr_subprocess_join_type gpr_subprocess_join_import; -#define gpr_subprocess_join gpr_subprocess_join_import -typedef void(*gpr_subprocess_interrupt_type)(gpr_subprocess *p); -extern gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import; -#define gpr_subprocess_interrupt gpr_subprocess_interrupt_import -typedef int(*gpr_thd_new_type)(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, const gpr_thd_options *options); -extern gpr_thd_new_type gpr_thd_new_import; -#define gpr_thd_new gpr_thd_new_import -typedef gpr_thd_options(*gpr_thd_options_default_type)(void); -extern gpr_thd_options_default_type gpr_thd_options_default_import; -#define gpr_thd_options_default gpr_thd_options_default_import -typedef void(*gpr_thd_options_set_detached_type)(gpr_thd_options *options); -extern gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import; -#define gpr_thd_options_set_detached gpr_thd_options_set_detached_import -typedef void(*gpr_thd_options_set_joinable_type)(gpr_thd_options *options); -extern gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import; -#define gpr_thd_options_set_joinable gpr_thd_options_set_joinable_import -typedef int(*gpr_thd_options_is_detached_type)(const gpr_thd_options *options); -extern gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import; -#define gpr_thd_options_is_detached gpr_thd_options_is_detached_import -typedef int(*gpr_thd_options_is_joinable_type)(const gpr_thd_options *options); -extern gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import; -#define gpr_thd_options_is_joinable gpr_thd_options_is_joinable_import -typedef gpr_thd_id(*gpr_thd_currentid_type)(void); -extern gpr_thd_currentid_type gpr_thd_currentid_import; -#define gpr_thd_currentid gpr_thd_currentid_import -typedef void(*gpr_thd_join_type)(gpr_thd_id t); -extern gpr_thd_join_type gpr_thd_join_import; -#define gpr_thd_join gpr_thd_join_import - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -void pygrpc_load_imports(HMODULE library); - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - -#else /* !GPR_WINDOWS */ - #include <grpc/byte_buffer.h> #include <grpc/byte_buffer_reader.h> #include <grpc/compression.h> @@ -895,6 +47,4 @@ void pygrpc_load_imports(HMODULE library); #include <grpc/support/time.h> #include <grpc/status.h> -#endif /* !GPR_WINDOWS */ - #endif diff --git a/src/python/grpcio/grpc/_cython/loader.c b/src/python/grpcio/grpc/_cython/loader.c index 86b70dbb02..34bd897549 100644 --- a/src/python/grpcio/grpc/_cython/loader.c +++ b/src/python/grpcio/grpc/_cython/loader.c @@ -38,31 +38,8 @@ extern "C" { #endif /* __cpluslus */ -#if GPR_WINDOWS - -int pygrpc_load_core(char *path) { - HMODULE grpc_c; -#ifdef GPR_ARCH_32 - /* Close your eyes for a moment, it'll all be over soon. */ - char *six = strrchr(path, '6'); - *six++ = '3'; - *six = '2'; -#endif - grpc_c = LoadLibraryA(path); - if (grpc_c) { - pygrpc_load_imports(grpc_c); - return 1; - } - - return 0; -} - -#else - int pygrpc_load_core(char *path) { return 1; } -#endif /* !GPR_WINDOWS */ - // Cython doesn't have Py_AtExit bindings, so we call the C_API directly int pygrpc_initialize_core(void) { grpc_init(); diff --git a/src/python/grpcio/grpc/_cython/loader.h b/src/python/grpcio/grpc/_cython/loader.h index eb4b1a1b01..62fd225204 100644 --- a/src/python/grpcio/grpc/_cython/loader.h +++ b/src/python/grpcio/grpc/_cython/loader.h @@ -39,6 +39,8 @@ /* Additional inclusions not covered by "imports.generated.h" */ #include <grpc/byte_buffer_reader.h> +/* TODO(atash) remove cruft */ + #ifdef __cplusplus extern "C" { #endif /* __cpluslus */ diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index f4c114056f..94a13bfb2f 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -157,7 +157,7 @@ def _abort(state, call, code, details): effective_details, _EMPTY_FLAGS), ) token = _SEND_STATUS_FROM_SERVER_TOKEN - call.start_batch( + call.start_server_batch( cygrpc.Operations(operations), _send_status_from_server(state, token)) state.statused = True @@ -257,7 +257,7 @@ class _Context(grpc.ServicerContext): if self._state.initial_metadata_allowed: operation = cygrpc.operation_send_initial_metadata( _common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS) - self._rpc_event.operation_call.start_batch( + self._rpc_event.operation_call.start_server_batch( cygrpc.Operations((operation,)), _send_initial_metadata(self._state)) self._state.initial_metadata_allowed = False @@ -292,7 +292,7 @@ class _RequestIterator(object): elif self._state.client is _CLOSED or self._state.statused: raise StopIteration() else: - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _receive_message(self._state, self._call, self._request_deserializer)) self._state.due.add(_RECEIVE_MESSAGE_TOKEN) @@ -333,7 +333,7 @@ def _unary_request(rpc_event, state, request_deserializer): if state.client is _CANCELLED or state.statused: return None else: - start_batch_result = rpc_event.operation_call.start_batch( + start_server_batch_result = rpc_event.operation_call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _receive_message( @@ -417,7 +417,7 @@ def _send_response(rpc_event, state, serialized_response): cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS), ) token = _SEND_MESSAGE_TOKEN - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations(operations), _send_message(state, token)) state.due.add(token) while True: @@ -443,7 +443,7 @@ def _status(rpc_event, state, serialized_response): if serialized_response is not None: operations.append(cygrpc.operation_send_message( serialized_response, _EMPTY_FLAGS)) - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations(operations), _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN)) state.statused = True @@ -550,7 +550,7 @@ def _handle_unrecognized_method(rpc_event): b'Method not found!', _EMPTY_FLAGS), ) rpc_state = _RPCState() - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( operations, lambda ignored_event: (rpc_state, (),)) return rpc_state @@ -558,7 +558,7 @@ def _handle_unrecognized_method(rpc_event): def _handle_with_method_handler(rpc_event, method_handler, thread_pool): state = _RPCState() with state.condition: - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)), _receive_close_on_server(state)) @@ -731,7 +731,7 @@ def _start(state): class Server(grpc.Server): - def __init__(self, generic_handlers, thread_pool): + def __init__(self, thread_pool, generic_handlers): completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server() server.register_completion_queue(completion_queue) diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py index 56456cc117..73415e0be7 100644 --- a/src/python/grpcio/grpc/beta/_client_adaptations.py +++ b/src/python/grpcio/grpc/beta/_client_adaptations.py @@ -117,7 +117,10 @@ class _Rendezvous(future.Future, face.Call): def exception(self, timeout=None): try: rpc_error_call = self._future.exception(timeout=timeout) - return _abortion_error(rpc_error_call) + if rpc_error_call is None: + return None + else: + return _abortion_error(rpc_error_call) except grpc.FutureTimeoutError: raise future.TimeoutError() except grpc.FutureCancelledError: diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py index 1e1f80156a..cca4a1797a 100644 --- a/src/python/grpcio/grpc/beta/_server_adaptations.py +++ b/src/python/grpcio/grpc/beta/_server_adaptations.py @@ -371,4 +371,5 @@ def server( _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size) else: effective_thread_pool = thread_pool - return _Server(grpc.server((generic_rpc_handler,), effective_thread_pool)) + return _Server( + grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,))) diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py index 0f4db9d972..ea38526a28 100644 --- a/src/python/grpcio/grpc_version.py +++ b/src/python/grpcio/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py index 33244eb388..7730374df0 100644 --- a/src/python/grpcio/support.py +++ b/src/python/grpcio/support.py @@ -50,7 +50,6 @@ Could not find <Python.h>. This could mean the following: (check your environment variables or try re-installing?) * You're on Windows and your Python installation was somehow corrupted (check your environment variables or try re-installing?) - * Note: Windows users should look into installing `vcpython27`. """ C_CHECKS = { diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py index 7aa600728a..90f68a5741 100644 --- a/src/python/grpcio_tests/grpc_version.py +++ b/src/python/grpcio_tests/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_tests/tests/_runner.py b/src/python/grpcio_tests/tests/_runner.py index f0718573e2..926dcbe23a 100644 --- a/src/python/grpcio_tests/tests/_runner.py +++ b/src/python/grpcio_tests/tests/_runner.py @@ -30,7 +30,6 @@ from __future__ import absolute_import import collections -import fcntl import multiprocessing import os import select @@ -178,15 +177,20 @@ class Runner(object): stderr_pipe.write_bypass( '\ninterrupted stderr:\n{}\n'.format(stderr_pipe.output().decode())) os._exit(1) - signal.signal(signal.SIGINT, sigint_handler) - signal.signal(signal.SIGSEGV, fault_handler) - signal.signal(signal.SIGBUS, fault_handler) - signal.signal(signal.SIGABRT, fault_handler) - signal.signal(signal.SIGFPE, fault_handler) - signal.signal(signal.SIGILL, fault_handler) + def try_set_handler(name, handler): + try: + signal.signal(getattr(signal, name), handler) + except AttributeError: + pass + try_set_handler('SIGINT', sigint_handler) + try_set_handler('SIGSEGV', fault_handler) + try_set_handler('SIGBUS', fault_handler) + try_set_handler('SIGABRT', fault_handler) + try_set_handler('SIGFPE', fault_handler) + try_set_handler('SIGILL', fault_handler) # Sometimes output will lag after a test has successfully finished; we # ignore such writes to our pipes. - signal.signal(signal.SIGPIPE, signal.SIG_IGN) + try_set_handler('SIGPIPE', signal.SIG_IGN) # Run the tests result.startTestRun() diff --git a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py index 91519b6fba..c753d6faf0 100644 --- a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py @@ -48,7 +48,7 @@ class InsecureInteropTest( port = self.server.add_insecure_port('[::]:0') self.server.start() self.stub = test_pb2.beta_create_TestService_stub( - implementations.insecure_channel('[::]', port)) + implementations.insecure_channel('localhost', port)) def tearDown(self): self.server.stop(0) diff --git a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py index c61547b977..cb09f54a34 100644 --- a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py @@ -55,7 +55,7 @@ class SecureInteropTest( self.server.start() self.stub = test_pb2.beta_create_TestService_stub( test_utilities.not_really_secure_channel( - '[::]', port, implementations.ssl_channel_credentials( + 'localhost', port, implementations.ssl_channel_credentials( resources.test_root_certificates()), _SERVER_HOST_OVERRIDE)) diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py index 86aa0495a2..97e6c9e27e 100644 --- a/src/python/grpcio_tests/tests/interop/methods.py +++ b/src/python/grpcio_tests/tests/interop/methods.py @@ -39,6 +39,7 @@ import time from oauth2client import client as oauth2client_client +import grpc from grpc.beta import implementations from grpc.beta import interfaces from grpc.framework.common import cardinality @@ -57,12 +58,18 @@ class TestService(test_pb2.BetaTestServiceServicer): return empty_pb2.Empty() def UnaryCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) return messages_pb2.SimpleResponse( payload=messages_pb2.Payload( type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size)) def StreamingOutputCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) for response_parameters in request.response_parameters: yield messages_pb2.StreamingOutputCallResponse( payload=messages_pb2.Payload( @@ -79,6 +86,9 @@ class TestService(test_pb2.BetaTestServiceServicer): def FullDuplexCall(self, request_iterator, context): for request in request_iterator: + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) for response_parameters in request.response_parameters: yield messages_pb2.StreamingOutputCallResponse( payload=messages_pb2.Payload( @@ -289,6 +299,39 @@ def _empty_stream(stub): pass +def _status_code_and_message(stub): + with stub: + message = 'test status message' + code = 2 + status = grpc.StatusCode.UNKNOWN # code = 2 + request = messages_pb2.SimpleRequest( + response_type=messages_pb2.COMPRESSABLE, + response_size=1, + payload=messages_pb2.Payload(body=b'\x00'), + response_status=messages_pb2.EchoStatus(code=code, message=message) + ) + response_future = stub.UnaryCall.future(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_future.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_future.details())) + + request = messages_pb2.StreamingOutputCallRequest( + response_type=messages_pb2.COMPRESSABLE, + response_parameters=( + messages_pb2.ResponseParameters(size=1),), + response_status=messages_pb2.EchoStatus(code=code, message=message)) + response_iterator = stub.StreamingOutputCall(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_iterator.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_iterator.details())) + + def _compute_engine_creds(stub, args): response = _large_unary_common_behavior(stub, True, True) if args.default_service_account != response.username: @@ -347,6 +390,7 @@ class TestCase(enum.Enum): CANCEL_AFTER_BEGIN = 'cancel_after_begin' CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response' EMPTY_STREAM = 'empty_stream' + STATUS_CODE_AND_MESSAGE = 'status_code_and_message' COMPUTE_ENGINE_CREDS = 'compute_engine_creds' OAUTH2_AUTH_TOKEN = 'oauth2_auth_token' JWT_TOKEN_CREDS = 'jwt_token_creds' @@ -372,6 +416,8 @@ class TestCase(enum.Enum): _timeout_on_sleeping_server(stub) elif self is TestCase.EMPTY_STREAM: _empty_stream(stub) + elif self is TestCase.STATUS_CODE_AND_MESSAGE: + _status_code_and_message(stub) elif self is TestCase.COMPUTE_ENGINE_CREDS: _compute_engine_creds(stub, args) elif self is TestCase.OAUTH2_AUTH_TOKEN: diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py index bf09380c85..7ca2bcff38 100644 --- a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py @@ -171,7 +171,7 @@ def _CreateService(): return servicer_methods.HalfDuplexCall(request_iter, context) server = grpc.server( - (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) port = server.add_insecure_port('[::]:0') server.start() @@ -192,7 +192,7 @@ def _CreateIncompleteService(): pass server = grpc.server( - (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) port = server.add_insecure_port('[::]:0') server.start() diff --git a/src/python/grpcio_tests/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py index 080281415d..83b46c914e 100644 --- a/src/python/grpcio_tests/tests/qps/benchmark_client.py +++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py @@ -37,16 +37,23 @@ from concurrent import futures from six.moves import queue import grpc -from grpc.beta import implementations -from grpc.framework.interfaces.face import face from src.proto.grpc.testing import messages_pb2 from src.proto.grpc.testing import services_pb2 from tests.unit import resources -from tests.unit.beta import test_utilities +from tests.unit import test_common _TIMEOUT = 60 * 60 * 24 +class GenericStub(object): + + def __init__(self, channel): + self.UnaryCall = channel.unary_unary( + '/grpc.testing.BenchmarkService/UnaryCall') + self.StreamingCall = channel.stream_stream( + '/grpc.testing.BenchmarkService/StreamingCall') + + class BenchmarkClient: """Benchmark client interface that exposes a non-blocking send_request().""" @@ -54,15 +61,12 @@ class BenchmarkClient: def __init__(self, server, config, hist): # Create the stub - host, port = server.split(':') - port = int(port) if config.HasField('security_params'): - creds = implementations.ssl_channel_credentials( - resources.test_root_certificates()) - channel = test_utilities.not_really_secure_channel( - host, port, creds, config.security_params.server_host_override) + creds = grpc.ssl_channel_credentials(resources.test_root_certificates()) + channel = test_common.test_secure_channel( + server, creds, config.security_params.server_host_override) else: - channel = implementations.insecure_channel(host, port) + channel = grpc.insecure_channel(server) connected_event = threading.Event() def wait_for_ready(connectivity): @@ -73,7 +77,7 @@ class BenchmarkClient: if config.payload_config.WhichOneof('payload') == 'simple_params': self._generic = False - self._stub = services_pb2.beta_create_BenchmarkService_stub(channel) + self._stub = services_pb2.BenchmarkServiceStub(channel) payload = messages_pb2.Payload( body='\0' * config.payload_config.simple_params.req_size) self._request = messages_pb2.SimpleRequest( @@ -81,7 +85,7 @@ class BenchmarkClient: response_size=config.payload_config.simple_params.resp_size) else: self._generic = True - self._stub = implementations.generic_stub(channel) + self._stub = GenericStub(channel) self._request = '\0' * config.payload_config.bytebuf_params.req_size self._hist = hist @@ -166,13 +170,8 @@ class _SyncStream(object): def start(self): self._is_streaming = True - if self._generic: - stream_callable = self._stub.stream_stream( - 'grpc.testing.BenchmarkService', 'StreamingCall') - else: - stream_callable = self._stub.StreamingCall - - response_stream = stream_callable(self._request_generator(), _TIMEOUT) + response_stream = self._stub.StreamingCall( + self._request_generator(), _TIMEOUT) for _ in response_stream: self._handle_response( self, time.time() - self._send_time_queue.get_nowait()) diff --git a/src/python/grpcio_tests/tests/qps/benchmark_server.py b/src/python/grpcio_tests/tests/qps/benchmark_server.py index 8cbf480d58..2b76b810cd 100644 --- a/src/python/grpcio_tests/tests/qps/benchmark_server.py +++ b/src/python/grpcio_tests/tests/qps/benchmark_server.py @@ -31,7 +31,7 @@ from src.proto.grpc.testing import messages_pb2 from src.proto.grpc.testing import services_pb2 -class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): +class BenchmarkServer(services_pb2.BenchmarkServiceServicer): """Synchronous Server implementation for the Benchmark service.""" def UnaryCall(self, request, context): @@ -44,7 +44,7 @@ class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): yield messages_pb2.SimpleResponse(payload=payload) -class GenericBenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): +class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer): """Generic Server implementation for the Benchmark service.""" def __init__(self, resp_size): diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py index 16926379a5..2371ff0956 100644 --- a/src/python/grpcio_tests/tests/qps/qps_worker.py +++ b/src/python/grpcio_tests/tests/qps/qps_worker.py @@ -32,18 +32,21 @@ import argparse import time +from concurrent import futures +import grpc from src.proto.grpc.testing import services_pb2 from tests.qps import worker_server def run_worker_server(port): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=5)) servicer = worker_server.WorkerServer() - server = services_pb2.beta_create_WorkerService_server(servicer) + services_pb2.add_WorkerServiceServicer_to_server(servicer, server) server.add_insecure_port('[::]:{}'.format(port)) server.start() servicer.wait_for_quit() - server.stop(2) + server.stop(0) if __name__ == '__main__': diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py index d41f8377c2..46d542940f 100644 --- a/src/python/grpcio_tests/tests/qps/worker_server.py +++ b/src/python/grpcio_tests/tests/qps/worker_server.py @@ -32,8 +32,8 @@ import random import threading import time -from grpc.beta import implementations -from grpc.framework.interfaces.face import utilities +from concurrent import futures +import grpc from src.proto.grpc.testing import control_pb2 from src.proto.grpc.testing import services_pb2 from src.proto.grpc.testing import stats_pb2 @@ -45,7 +45,7 @@ from tests.qps import histogram from tests.unit import resources -class WorkerServer(services_pb2.BetaWorkerServiceServicer): +class WorkerServer(services_pb2.WorkerServiceServicer): """Python Worker Server implementation.""" def __init__(self): @@ -65,7 +65,7 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer): if request.mark.reset: start_time = end_time yield status - server.stop(0) + server.stop(None) def _get_server_status(self, start_time, end_time, port, cores): end_time = time.time() @@ -76,25 +76,35 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer): return control_pb2.ServerStatus(stats=stats, port=port, cores=cores) def _create_server(self, config): - if config.server_type == control_pb2.SYNC_SERVER: + if config.async_server_threads == 0: + # This is the default concurrent.futures thread pool size, but + # None doesn't seem to work + server_threads = multiprocessing.cpu_count() * 5 + else: + server_threads = config.async_server_threads + server = grpc.server(futures.ThreadPoolExecutor( + max_workers=server_threads)) + if config.server_type == control_pb2.ASYNC_SERVER: servicer = benchmark_server.BenchmarkServer() - server = services_pb2.beta_create_BenchmarkService_server(servicer) + services_pb2.add_BenchmarkServiceServicer_to_server(servicer, server) elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER: resp_size = config.payload_config.bytebuf_params.resp_size servicer = benchmark_server.GenericBenchmarkServer(resp_size) method_implementations = { - ('grpc.testing.BenchmarkService', 'StreamingCall'): - utilities.stream_stream_inline(servicer.StreamingCall), - ('grpc.testing.BenchmarkService', 'UnaryCall'): - utilities.unary_unary_inline(servicer.UnaryCall), + 'StreamingCall': + grpc.stream_stream_rpc_method_handler(servicer.StreamingCall), + 'UnaryCall': + grpc.unary_unary_rpc_method_handler(servicer.UnaryCall), } - server = implementations.server(method_implementations) + handler = grpc.method_handlers_generic_handler( + 'grpc.testing.BenchmarkService', method_implementations) + server.add_generic_rpc_handlers((handler,)) else: raise Exception('Unsupported server type {}'.format(config.server_type)) if config.HasField('security_params'): # Use SSL - server_creds = implementations.ssl_server_credentials([( - resources.private_key(), resources.certificate_chain())]) + server_creds = grpc.ssl_server_credentials( + ((resources.private_key(), resources.certificate_chain()),)) port = server.add_secure_port('[::]:{}'.format(config.port), server_creds) else: port = server.add_insecure_port('[::]:{}'.format(config.port)) diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index 45eb75b242..dcaef0db1f 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -12,6 +12,7 @@ "_channel_test.ChannelTest", "_compression_test.CompressionTest", "_connectivity_channel_test.ConnectivityStatesTest", + "_credentials_test.CredentialsTest", "_empty_message_test.EmptyMessageTest", "_exit_test.ExitTest", "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py index ae8de523ec..3c00f686ce 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py @@ -104,7 +104,7 @@ class ChannelConnectivityTest(unittest.TestCase): grpc.ChannelConnectivity.READY, fifth_connectivities) def test_immediately_connectable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() first_callback = _Callback() @@ -143,7 +143,7 @@ class ChannelConnectivityTest(unittest.TestCase): grpc.ChannelConnectivity.SHUTDOWN, fourth_connectivities) def test_reachable_then_unreachable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() callback = _Callback() diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py index b84bc0197a..e8982ed2de 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py @@ -78,7 +78,7 @@ class ChannelReadyFutureTest(unittest.TestCase): self.assertFalse(ready_future.running()) def test_immediately_connectable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py index 9e8b8578c1..83b9109466 100644 --- a/src/python/grpcio_tests/tests/unit/_compression_test.py +++ b/src/python/grpcio_tests/tests/unit/_compression_test.py @@ -88,7 +88,8 @@ class CompressionTest(unittest.TestCase): def setUp(self): self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) - self._server = grpc.server((_GenericHandler(),), self._server_pool) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(),)) self._port = self._server.add_insecure_port('[::]:0') self._server.start() diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py new file mode 100644 index 0000000000..87af85a0b9 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py @@ -0,0 +1,72 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests of credentials.""" + +import unittest + +import grpc + + +class CredentialsTest(unittest.TestCase): + + def test_call_credentials_composition(self): + first = grpc.access_token_call_credentials('abc') + second = grpc.access_token_call_credentials('def') + third = grpc.access_token_call_credentials('ghi') + + first_and_second = grpc.composite_call_credentials(first, second) + first_second_and_third = grpc.composite_call_credentials( + first, second, third) + + self.assertIsInstance(first_and_second, grpc.CallCredentials) + self.assertIsInstance(first_second_and_third, grpc.CallCredentials) + + def test_channel_credentials_composition(self): + first_call_credentials = grpc.access_token_call_credentials('abc') + second_call_credentials = grpc.access_token_call_credentials('def') + third_call_credentials = grpc.access_token_call_credentials('ghi') + channel_credentials = grpc.ssl_channel_credentials() + + channel_and_first = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials) + channel_first_and_second = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials, second_call_credentials) + channel_first_second_and_third = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials, second_call_credentials, + third_call_credentials) + + self.assertIsInstance(channel_and_first, grpc.ChannelCredentials) + self.assertIsInstance(channel_first_and_second, grpc.ChannelCredentials) + self.assertIsInstance( + channel_first_second_and_third, grpc.ChannelCredentials) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py index cac0c8b3b9..cf212c5653 100644 --- a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py @@ -81,11 +81,11 @@ class _Handler(object): self._state.condition.wait() with self._lock: - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)), _RECEIVE_CLOSE_ON_SERVER_TAG) - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _RECEIVE_MESSAGE_TAG) first_event = self._completion_queue.poll() @@ -101,7 +101,7 @@ class _Handler(object): _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!', _EMPTY_FLAGS), ) - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG) self._completion_queue.poll() self._completion_queue.poll() @@ -193,7 +193,7 @@ class CancelManyCallsTest(unittest.TestCase): cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) tag = 'client_complete_call_{0:04d}_tag'.format(index) - client_call.start_batch(cygrpc.Operations(operations), tag) + client_call.start_client_batch(cygrpc.Operations(operations), tag) client_due.add(tag) client_calls.append(client_call) diff --git a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py index 27fcee0d6f..152d8edde3 100644 --- a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py @@ -168,12 +168,12 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): client_complete_rpc_tag = 'client_complete_rpc_tag' with client_condition: client_receive_initial_metadata_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), ]), client_receive_initial_metadata_tag)) client_due.add(client_receive_initial_metadata_tag) client_complete_rpc_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_send_initial_metadata( _EMPTY_METADATA, _EMPTY_FLAGS), cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), @@ -185,30 +185,30 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): with server_call_condition: server_send_initial_metadata_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_initial_metadata( _EMPTY_METADATA, _EMPTY_FLAGS), - ]), server_send_initial_metadata_tag)) + ], server_send_initial_metadata_tag)) server_send_first_message_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS), - ]), server_send_first_message_tag)) + ], server_send_first_message_tag)) server_send_initial_metadata_event = server_call_driver.event_with_tag( server_send_initial_metadata_tag) server_send_first_message_event = server_call_driver.event_with_tag( server_send_first_message_tag) with server_call_condition: server_send_second_message_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS), - ]), server_send_second_message_tag)) + ], server_send_second_message_tag)) server_complete_rpc_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS), cygrpc.operation_send_status_from_server( cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details', _EMPTY_FLAGS), - ]), server_complete_rpc_tag)) + ], server_complete_rpc_tag)) server_send_second_message_event = server_call_driver.event_with_tag( server_send_second_message_tag) server_complete_rpc_event = server_call_driver.event_with_tag( @@ -218,7 +218,7 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): with client_condition: client_receive_first_message_tag = 'client_receive_first_message_tag' client_receive_first_message_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_receive_message(_EMPTY_FLAGS), ]), client_receive_first_message_tag)) client_due.add(client_receive_first_message_tag) diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py index b740695e35..9d1dbc189b 100644 --- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py @@ -186,7 +186,8 @@ class ServerClientMixin(object): def performer(): tag = object() try: - call_result = call.start_batch(cygrpc.Operations(operations), tag) + call_result = call.start_client_batch( + cygrpc.Operations(operations), tag) self.assertEqual(cygrpc.CallError.ok, call_result) event = queue.poll(deadline) self.assertEqual(cygrpc.CompletionType.operation_complete, event.type) @@ -231,7 +232,7 @@ class ServerClientMixin(object): cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE), cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)]) - client_start_batch_result = client_call.start_batch(cygrpc.Operations([ + client_start_batch_result = client_call.start_client_batch([ cygrpc.operation_send_initial_metadata(client_initial_metadata, _EMPTY_FLAGS), cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS), @@ -239,7 +240,7 @@ class ServerClientMixin(object): cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS) - ]), client_call_tag) + ], client_call_tag) self.assertEqual(cygrpc.CallError.ok, client_start_batch_result) client_event_future = test_utilities.CompletionQueuePollFuture( self.client_completion_queue, cygrpc_deadline) @@ -268,7 +269,7 @@ class ServerClientMixin(object): server_trailing_metadata = cygrpc.Metadata([ cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE)]) - server_start_batch_result = server_call.start_batch([ + server_start_batch_result = server_call.start_server_batch([ cygrpc.operation_send_initial_metadata(server_initial_metadata, _EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), diff --git a/src/python/grpcio_tests/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py index 8c7d697728..131f6e9452 100644 --- a/src/python/grpcio_tests/tests/unit/_empty_message_test.py +++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py @@ -103,7 +103,8 @@ class EmptyMessageTest(unittest.TestCase): def setUp(self): self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) - self._server = grpc.server((_GenericHandler(),), self._server_pool) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(),)) port = self._server.add_insecure_port('[::]:0') self._server.start() self._channel = grpc.insecure_channel('localhost:%d' % port) diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py index 24a2faef85..b33802bf57 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py +++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py @@ -184,11 +184,11 @@ if __name__ == '__main__': args = parser.parse_args() if args.scenario == UNSTARTED_SERVER: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) if args.wait_for_interrupt: time.sleep(WAIT_TIME) elif args.scenario == RUNNING_SERVER: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.start() if args.wait_for_interrupt: @@ -203,7 +203,7 @@ if __name__ == '__main__': if args.wait_for_interrupt: time.sleep(WAIT_TIME) elif args.scenario == POLL_CONNECTIVITY: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:%d' % port) @@ -217,7 +217,7 @@ if __name__ == '__main__': else: handler = GenericHandler() - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.add_generic_rpc_handlers((handler,)) server.start() diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py index 0fd02d2a22..fb3e547781 100644 --- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py +++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py @@ -189,7 +189,7 @@ class MetadataCodeDetailsTest(unittest.TestCase): self._servicer = _Servicer() self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) self._server = grpc.server( - (_generic_handler(self._servicer),), self._server_pool) + self._server_pool, handlers=(_generic_handler(self._servicer),)) port = self._server.add_insecure_port('[::]:0') self._server.start() diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py index c637a28039..da73476929 100644 --- a/src/python/grpcio_tests/tests/unit/_metadata_test.py +++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py @@ -161,8 +161,8 @@ class MetadataTest(unittest.TestCase): def setUp(self): self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) - self._server = grpc.server((_GenericHandler(weakref.proxy(self)),), - self._server_pool) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),)) port = self._server.add_insecure_port('[::]:0') self._server.start() self._channel = grpc.insecure_channel('localhost:%d' % port, diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py index c70d65a6df..59bf240d28 100644 --- a/src/python/grpcio_tests/tests/unit/_rpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py @@ -184,7 +184,7 @@ class RPCTest(unittest.TestCase): self._handler = _Handler(self._control) self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) - self._server = grpc.server((), self._server_pool) + self._server = grpc.server(self._server_pool) port = self._server.add_insecure_port('[::]:0') self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),)) self._server.start() diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py index 791620307b..d32208f9eb 100644 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py @@ -41,6 +41,7 @@ from concurrent import futures import six # test_interfaces is referenced from specification in this module. +from grpc.framework.foundation import future from grpc.framework.foundation import logging_pool from grpc.framework.interfaces.face import face from tests.unit.framework.common import test_constants @@ -159,6 +160,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. test_messages.verify(request, response, self) self.assertIs(callback.future(), response_future) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -191,6 +194,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. test_messages.verify(requests, response, self) self.assertIs(future_passed_to_callback, response_future) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -301,6 +306,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. self.assertIs(callback.future(), response_future) self.assertFalse(cancel_method_return_value) self.assertTrue(response_future.cancelled()) + with self.assertRaises(future.CancelledError): + response_future.result() + with self.assertRaises(future.CancelledError): + response_future.exception() + with self.assertRaises(future.CancelledError): + response_future.traceback() def testCancelledUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -332,6 +343,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. self.assertIs(callback.future(), response_future) self.assertFalse(cancel_method_return_value) self.assertTrue(response_future.cancelled()) + with self.assertRaises(future.CancelledError): + response_future.result() + with self.assertRaises(future.CancelledError): + response_future.exception() + with self.assertRaises(future.CancelledError): + response_future.traceback() def testCancelledStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -363,6 +380,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsInstance( + response_future.exception(), face.AbortionError) + self.assertIsNotNone(response_future.traceback()) def testExpiredUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -392,6 +412,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsInstance( + response_future.exception(), face.AbortionError) + self.assertIsNotNone(response_future.traceback()) def testExpiredStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -426,6 +449,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsNotNone(response_future.traceback()) def testFailedUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -463,6 +487,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsNotNone(response_future.traceback()) def testFailedStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( diff --git a/src/python/grpcio_tests/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py index c8886bf4ca..cd71bd80d7 100644 --- a/src/python/grpcio_tests/tests/unit/test_common.py +++ b/src/python/grpcio_tests/tests/unit/test_common.py @@ -31,6 +31,7 @@ import collections +import grpc import six INVOCATION_INITIAL_METADATA = (('0', 'abc'), ('1', 'def'), ('2', 'ghi'),) @@ -78,3 +79,24 @@ def metadata_transmitted(original_metadata, transmitted_metadata): return False else: return True + + +def test_secure_channel( + target, channel_credentials, server_host_override): + """Creates an insecure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + channel_credentials: The implementations.ChannelCredentials with which to + connect. + server_host_override: The target name used for SSL host name checking. + + Returns: + An implementations.Channel to the remote host through which RPCs may be + conducted. + """ + channel = grpc.secure_channel( + target, channel_credentials, + (('grpc.ssl_target_name_override', server_host_override,),)) + return channel diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c index 1172691116..61b7c30315 100644 --- a/src/ruby/ext/grpc/rb_byte_buffer.c +++ b/src/ruby/ext/grpc/rb_byte_buffer.c @@ -56,7 +56,10 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) { return Qnil; } rb_string = rb_str_buf_new(grpc_byte_buffer_length(buffer)); - grpc_byte_buffer_reader_init(&reader, buffer); + if (!grpc_byte_buffer_reader_init(&reader, buffer)) { + rb_raise(rb_eRuntimeError, "Error initializing byte buffer reader."); + return Qnil; + } while (grpc_byte_buffer_reader_next(&reader, &next) != 0) { rb_str_cat(rb_string, (const char *) GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next)); diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c index b436057c16..2126124443 100644 --- a/src/ruby/ext/grpc/rb_call.c +++ b/src/ruby/ext/grpc/rb_call.c @@ -63,27 +63,18 @@ static VALUE grpc_rb_sBatchResult; * grpc_metadata_array. */ static VALUE grpc_rb_cMdAry; -/* id_cq is the name of the hidden ivar that preserves a reference to a - * completion queue */ -static ID id_cq; - -/* id_flags is the name of the hidden ivar that preserves the value of - * the flags used to create metadata from a Hash */ -static ID id_flags; - /* id_credentials is the name of the hidden ivar that preserves the value * of the credentials added to the call */ static ID id_credentials; -/* id_input_md is the name of the hidden ivar that preserves the hash used to - * create metadata, so that references to the strings it contains last as long - * as the call the metadata is added to. */ -static ID id_input_md; - /* id_metadata is name of the attribute used to access the metadata hash * received by the call and subsequently saved on it. */ static ID id_metadata; +/* id_trailing_metadata is the name of the attribute used to access the trailing + * metadata hash received by the call and subsequently saved on it. */ +static ID id_trailing_metadata; + /* id_status is name of the attribute used to access the status object * received by the call and subsequently saved on it. */ static ID id_status; @@ -101,14 +92,27 @@ static VALUE sym_message; static VALUE sym_status; static VALUE sym_cancelled; +typedef struct grpc_rb_call { + grpc_call *wrapped; + grpc_completion_queue *queue; +} grpc_rb_call; + +static void destroy_call(grpc_rb_call *call) { + /* Ensure that we only try to destroy the call once */ + if (call->wrapped != NULL) { + grpc_call_destroy(call->wrapped); + call->wrapped = NULL; + grpc_rb_completion_queue_destroy(call->queue); + call->queue = NULL; + } +} + /* Destroys a Call. */ static void grpc_rb_call_destroy(void *p) { - grpc_call* call = NULL; if (p == NULL) { return; } - call = (grpc_call *)p; - grpc_call_destroy(call); + destroy_call((grpc_rb_call*)p); } static size_t md_ary_datasize(const void *p) { @@ -167,15 +171,15 @@ const char *grpc_call_error_detail_of(grpc_call_error err) { /* Called by clients to cancel an RPC on the server. Can be called multiple times, from any thread. */ static VALUE grpc_rb_call_cancel(VALUE self) { - grpc_call *call = NULL; + grpc_rb_call *call = NULL; grpc_call_error err; if (RTYPEDDATA_DATA(self) == NULL) { //This call has been closed return Qnil; } - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); - err = grpc_call_cancel(call, NULL); + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); + err = grpc_call_cancel(call->wrapped, NULL); if (err != GRPC_CALL_OK) { rb_raise(grpc_rb_eCallError, "cancel failed: %s (code=%d)", grpc_call_error_detail_of(err), err); @@ -189,10 +193,10 @@ static VALUE grpc_rb_call_cancel(VALUE self) { processed. */ static VALUE grpc_rb_call_close(VALUE self) { - grpc_call *call = NULL; - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); + grpc_rb_call *call = NULL; + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); if(call != NULL) { - grpc_call_destroy(call); + destroy_call(call); RTYPEDDATA_DATA(self) = NULL; } return Qnil; @@ -201,14 +205,14 @@ static VALUE grpc_rb_call_close(VALUE self) { /* Called to obtain the peer that this call is connected to. */ static VALUE grpc_rb_call_get_peer(VALUE self) { VALUE res = Qnil; - grpc_call *call = NULL; + grpc_rb_call *call = NULL; char *peer = NULL; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot get peer value on closed call"); return Qnil; } - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); - peer = grpc_call_get_peer(call); + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); + peer = grpc_call_get_peer(call->wrapped); res = rb_str_new2(peer); gpr_free(peer); @@ -217,16 +221,16 @@ static VALUE grpc_rb_call_get_peer(VALUE self) { /* Called to obtain the x509 cert of an authenticated peer. */ static VALUE grpc_rb_call_get_peer_cert(VALUE self) { - grpc_call *call = NULL; + grpc_rb_call *call = NULL; VALUE res = Qnil; grpc_auth_context *ctx = NULL; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot get peer cert on closed call"); return Qnil; } - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); - ctx = grpc_call_auth_context(call); + ctx = grpc_call_auth_context(call->wrapped); if (!ctx || !grpc_auth_context_peer_is_authenticated(ctx)) { return Qnil; @@ -298,6 +302,30 @@ static VALUE grpc_rb_call_set_metadata(VALUE self, VALUE metadata) { /* call-seq: + trailing_metadata = call.trailing_metadata + + Gets the trailing metadata object saved on the call */ +static VALUE grpc_rb_call_get_trailing_metadata(VALUE self) { + return rb_ivar_get(self, id_trailing_metadata); +} + +/* + call-seq: + call.trailing_metadata = trailing_metadata + + Saves the trailing metadata hash on the call. */ +static VALUE grpc_rb_call_set_trailing_metadata(VALUE self, VALUE metadata) { + if (!NIL_P(metadata) && TYPE(metadata) != T_HASH) { + rb_raise(rb_eTypeError, "bad metadata: got:<%s> want: <Hash>", + rb_obj_classname(metadata)); + return Qnil; + } + + return rb_ivar_set(self, id_trailing_metadata, metadata); +} + +/* + call-seq: write_flag = call.write_flag Gets the write_flag value saved the call. */ @@ -326,21 +354,23 @@ static VALUE grpc_rb_call_set_write_flag(VALUE self, VALUE write_flag) { Sets credentials on a call */ static VALUE grpc_rb_call_set_credentials(VALUE self, VALUE credentials) { - grpc_call *call = NULL; + grpc_rb_call *call = NULL; grpc_call_credentials *creds; grpc_call_error err; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot set credentials of closed call"); return Qnil; } - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); creds = grpc_rb_get_wrapped_call_credentials(credentials); - err = grpc_call_set_credentials(call, creds); + err = grpc_call_set_credentials(call->wrapped, creds); if (err != GRPC_CALL_OK) { rb_raise(grpc_rb_eCallError, "grpc_call_set_credentials failed with %s (code=%d)", grpc_call_error_detail_of(err), err); } + /* We need the credentials to be alive for as long as the call is alive, + but we don't care about destruction order. */ rb_ivar_set(self, id_credentials, credentials); return Qnil; } @@ -733,7 +763,6 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) { } /* call-seq: - cq = CompletionQueue.new ops = { GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>, GRPC::Core::CallOps::SEND_MESSAGE => <op_value>, @@ -741,7 +770,7 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) { } tag = Object.new timeout = 10 - call.start_batch(cq, tag, timeout, ops) + call.start_batch(tag, timeout, ops) Start a batch of operations defined in the array ops; when complete, post a completion of type 'tag' to the completion queue bound to the call. @@ -750,20 +779,20 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) { The order of ops specified in the batch has no significance. Only one operation of each type can be active at once in any given batch */ -static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag, - VALUE timeout, VALUE ops_hash) { +static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) { run_batch_stack st; - grpc_call *call = NULL; + grpc_rb_call *call = NULL; grpc_event ev; grpc_call_error err; VALUE result = Qnil; VALUE rb_write_flag = rb_ivar_get(self, id_write_flag); unsigned write_flag = 0; + void *tag = (void*)&st; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call"); return Qnil; } - TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call); + TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); /* Validate the ops args, adding them to a ruby array */ if (TYPE(ops_hash) != T_HASH) { @@ -778,7 +807,7 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag, /* call grpc_call_start_batch, then wait for it to complete using * pluck_event */ - err = grpc_call_start_batch(call, st.ops, st.op_num, ROBJECT(tag), NULL); + err = grpc_call_start_batch(call->wrapped, st.ops, st.op_num, tag, NULL); if (err != GRPC_CALL_OK) { grpc_run_batch_stack_cleanup(&st); rb_raise(grpc_rb_eCallError, @@ -786,13 +815,11 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag, grpc_call_error_detail_of(err), err); return Qnil; } - ev = grpc_rb_completion_queue_pluck_event(cqueue, tag, timeout); - if (ev.type == GRPC_QUEUE_TIMEOUT) { - grpc_run_batch_stack_cleanup(&st); - rb_raise(grpc_rb_eOutOfTime, "grpc_call_start_batch timed out"); - return Qnil; + ev = rb_completion_queue_pluck(call->queue, tag, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + if (!ev.success) { + rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow"); } - /* Build and return the BatchResult struct result, if there is an error, it's reflected in the status */ result = grpc_run_batch_stack_build_result(&st); @@ -900,7 +927,7 @@ void Init_grpc_call() { 1); /* Add ruby analogues of the Call methods. */ - rb_define_method(grpc_rb_cCall, "run_batch", grpc_rb_call_run_batch, 4); + rb_define_method(grpc_rb_cCall, "run_batch", grpc_rb_call_run_batch, 1); rb_define_method(grpc_rb_cCall, "cancel", grpc_rb_call_cancel, 0); rb_define_method(grpc_rb_cCall, "close", grpc_rb_call_close, 0); rb_define_method(grpc_rb_cCall, "peer", grpc_rb_call_get_peer, 0); @@ -909,6 +936,10 @@ void Init_grpc_call() { rb_define_method(grpc_rb_cCall, "status=", grpc_rb_call_set_status, 1); rb_define_method(grpc_rb_cCall, "metadata", grpc_rb_call_get_metadata, 0); rb_define_method(grpc_rb_cCall, "metadata=", grpc_rb_call_set_metadata, 1); + rb_define_method(grpc_rb_cCall, "trailing_metadata", + grpc_rb_call_get_trailing_metadata, 0); + rb_define_method(grpc_rb_cCall, "trailing_metadata=", + grpc_rb_call_set_trailing_metadata, 1); rb_define_method(grpc_rb_cCall, "write_flag", grpc_rb_call_get_write_flag, 0); rb_define_method(grpc_rb_cCall, "write_flag=", grpc_rb_call_set_write_flag, 1); @@ -917,13 +948,11 @@ void Init_grpc_call() { /* Ids used to support call attributes */ id_metadata = rb_intern("metadata"); + id_trailing_metadata = rb_intern("trailing_metadata"); id_status = rb_intern("status"); id_write_flag = rb_intern("write_flag"); /* Ids used by the c wrapping internals. */ - id_cq = rb_intern("__cq"); - id_flags = rb_intern("__flags"); - id_input_md = rb_intern("__input_md"); id_credentials = rb_intern("__credentials"); /* Ids used in constructing the batch result. */ @@ -947,15 +976,19 @@ void Init_grpc_call() { /* Gets the call from the ruby object */ grpc_call *grpc_rb_get_wrapped_call(VALUE v) { - grpc_call *c = NULL; - TypedData_Get_Struct(v, grpc_call, &grpc_call_data_type, c); - return c; + grpc_rb_call *call = NULL; + TypedData_Get_Struct(v, grpc_rb_call, &grpc_call_data_type, call); + return call->wrapped; } /* Obtains the wrapped object for a given call */ -VALUE grpc_rb_wrap_call(grpc_call *c) { - if (c == NULL) { +VALUE grpc_rb_wrap_call(grpc_call *c, grpc_completion_queue *q) { + grpc_rb_call *wrapper; + if (c == NULL || q == NULL) { return Qnil; } - return TypedData_Wrap_Struct(grpc_rb_cCall, &grpc_call_data_type, c); + wrapper = ALLOC(grpc_rb_call); + wrapper->wrapped = c; + wrapper->queue = q; + return TypedData_Wrap_Struct(grpc_rb_cCall, &grpc_call_data_type, wrapper); } diff --git a/src/ruby/ext/grpc/rb_call.h b/src/ruby/ext/grpc/rb_call.h index 24adb3477b..56becdc5a4 100644 --- a/src/ruby/ext/grpc/rb_call.h +++ b/src/ruby/ext/grpc/rb_call.h @@ -42,7 +42,7 @@ grpc_call* grpc_rb_get_wrapped_call(VALUE v); /* Gets the VALUE corresponding to given grpc_call. */ -VALUE grpc_rb_wrap_call(grpc_call* c); +VALUE grpc_rb_wrap_call(grpc_call *c, grpc_completion_queue *q); /* Provides the details of an call error */ const char* grpc_call_error_detail_of(grpc_call_error err); diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c index 79ca5b32ce..9b6675da84 100644 --- a/src/ruby/ext/grpc/rb_call_credentials.c +++ b/src/ruby/ext/grpc/rb_call_credentials.c @@ -211,35 +211,6 @@ VALUE grpc_rb_wrap_call_credentials(grpc_call_credentials *c, VALUE mark) { return rb_wrapper; } -/* Clones CallCredentials instances. - Gives CallCredentials a consistent implementation of Ruby's object copy/dup - protocol. */ -static VALUE grpc_rb_call_credentials_init_copy(VALUE copy, VALUE orig) { - grpc_rb_call_credentials *orig_cred = NULL; - grpc_rb_call_credentials *copy_cred = NULL; - - if (copy == orig) { - return copy; - } - - /* Raise an error if orig is not a credentials object or a subclass. */ - if (TYPE(orig) != T_DATA || - RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_call_credentials_free) { - rb_raise(rb_eTypeError, "not a %s", - rb_obj_classname(grpc_rb_cCallCredentials)); - } - - TypedData_Get_Struct(orig, grpc_rb_call_credentials, - &grpc_rb_call_credentials_data_type, orig_cred); - TypedData_Get_Struct(copy, grpc_rb_call_credentials, - &grpc_rb_call_credentials_data_type, copy_cred); - - /* use ruby's MEMCPY to make a byte-for-byte copy of the credentials - * wrapper object. */ - MEMCPY(copy_cred, orig_cred, grpc_rb_call_credentials, 1); - return copy; -} - /* The attribute used on the mark object to hold the callback */ static ID id_callback; @@ -308,7 +279,7 @@ void Init_grpc_call_credentials() { rb_define_method(grpc_rb_cCallCredentials, "initialize", grpc_rb_call_credentials_init, 1); rb_define_method(grpc_rb_cCallCredentials, "initialize_copy", - grpc_rb_call_credentials_init_copy, 1); + grpc_rb_cannot_init_copy, 1); rb_define_method(grpc_rb_cCallCredentials, "compose", grpc_rb_call_credentials_compose, -1); diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c index 6943c93d4a..18a15d0125 100644 --- a/src/ruby/ext/grpc/rb_channel.c +++ b/src/ruby/ext/grpc/rb_channel.c @@ -39,6 +39,7 @@ #include <grpc/grpc.h> #include <grpc/grpc_security.h> #include <grpc/support/alloc.h> +#include <grpc/support/log.h> #include "rb_grpc.h" #include "rb_call.h" #include "rb_channel_args.h" @@ -55,11 +56,6 @@ static ID id_channel; * GCed before the channel */ static ID id_target; -/* id_cqueue is the name of the hidden ivar that preserves a reference to the - * completion queue used to create the call, preserved so that it does not get - * GCed before the channel */ -static ID id_cqueue; - /* id_insecure_channel is used to indicate that a channel is insecure */ static VALUE id_insecure_channel; @@ -231,40 +227,11 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self, return Qnil; } -/* Clones Channel instances. - - Gives Channel a consistent implementation of Ruby's object copy/dup - protocol. */ -static VALUE grpc_rb_channel_init_copy(VALUE copy, VALUE orig) { - grpc_rb_channel *orig_ch = NULL; - grpc_rb_channel *copy_ch = NULL; - - if (copy == orig) { - return copy; - } - - /* Raise an error if orig is not a channel object or a subclass. */ - if (TYPE(orig) != T_DATA || - RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_free) { - rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(grpc_rb_cChannel)); - return Qnil; - } - - TypedData_Get_Struct(orig, grpc_rb_channel, &grpc_channel_data_type, orig_ch); - TypedData_Get_Struct(copy, grpc_rb_channel, &grpc_channel_data_type, copy_ch); - - /* use ruby's MEMCPY to make a byte-for-byte copy of the channel wrapper - * object. */ - MEMCPY(copy_ch, orig_ch, grpc_rb_channel, 1); - return copy; -} - /* Create a call given a grpc_channel, in order to call method. The request is not sent until grpc_call_invoke is called. */ -static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue, - VALUE parent, VALUE mask, - VALUE method, VALUE host, - VALUE deadline) { +static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, + VALUE mask, VALUE method, + VALUE host, VALUE deadline) { VALUE res = Qnil; grpc_rb_channel *wrapper = NULL; grpc_call *call = NULL; @@ -284,7 +251,7 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue, parent_call = grpc_rb_get_wrapped_call(parent); } - cq = grpc_rb_get_wrapped_completion_queue(cqueue); + cq = grpc_completion_queue_create(NULL); TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper); ch = wrapper->wrapped; if (ch == NULL) { @@ -301,15 +268,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue, method_chars); return Qnil; } - res = grpc_rb_wrap_call(call); + res = grpc_rb_wrap_call(call, cq); /* Make this channel an instance attribute of the call so that it is not GCed * before the call. */ rb_ivar_set(res, id_channel, self); - - /* Make the completion queue an instance attribute of the call so that it is - * not GCed before the call. */ - rb_ivar_set(res, id_cqueue, cqueue); return res; } @@ -387,7 +350,7 @@ void Init_grpc_channel() { /* Provides a ruby constructor and support for dup/clone. */ rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1); rb_define_method(grpc_rb_cChannel, "initialize_copy", - grpc_rb_channel_init_copy, 1); + grpc_rb_cannot_init_copy, 1); /* Add ruby analogues of the Channel methods. */ rb_define_method(grpc_rb_cChannel, "connectivity_state", @@ -396,13 +359,12 @@ void Init_grpc_channel() { rb_define_method(grpc_rb_cChannel, "watch_connectivity_state", grpc_rb_channel_watch_connectivity_state, 4); rb_define_method(grpc_rb_cChannel, "create_call", - grpc_rb_channel_create_call, 6); + grpc_rb_channel_create_call, 5); rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0); rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0); rb_define_alias(grpc_rb_cChannel, "close", "destroy"); id_channel = rb_intern("__channel"); - id_cqueue = rb_intern("__cqueue"); id_target = rb_intern("__target"); rb_define_const(grpc_rb_cChannel, "SSL_TARGET", ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG))); diff --git a/src/ruby/ext/grpc/rb_channel_credentials.c b/src/ruby/ext/grpc/rb_channel_credentials.c index cbb23885aa..5b7aa3417e 100644 --- a/src/ruby/ext/grpc/rb_channel_credentials.c +++ b/src/ruby/ext/grpc/rb_channel_credentials.c @@ -126,36 +126,6 @@ VALUE grpc_rb_wrap_channel_credentials(grpc_channel_credentials *c, VALUE mark) return rb_wrapper; } -/* Clones ChannelCredentials instances. - Gives ChannelCredentials a consistent implementation of Ruby's object copy/dup - protocol. */ -static VALUE grpc_rb_channel_credentials_init_copy(VALUE copy, VALUE orig) { - grpc_rb_channel_credentials *orig_cred = NULL; - grpc_rb_channel_credentials *copy_cred = NULL; - - if (copy == orig) { - return copy; - } - - /* Raise an error if orig is not a credentials object or a subclass. */ - if (TYPE(orig) != T_DATA || - RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_credentials_free) { - rb_raise(rb_eTypeError, "not a %s", - rb_obj_classname(grpc_rb_cChannelCredentials)); - } - - TypedData_Get_Struct(orig, grpc_rb_channel_credentials, - &grpc_rb_channel_credentials_data_type, orig_cred); - TypedData_Get_Struct(copy, grpc_rb_channel_credentials, - &grpc_rb_channel_credentials_data_type, copy_cred); - - /* use ruby's MEMCPY to make a byte-for-byte copy of the credentials - * wrapper object. */ - MEMCPY(copy_cred, orig_cred, grpc_rb_channel_credentials, 1); - return copy; -} - - /* The attribute used on the mark object to hold the pem_root_certs. */ static ID id_pem_root_certs; @@ -271,7 +241,7 @@ void Init_grpc_channel_credentials() { rb_define_method(grpc_rb_cChannelCredentials, "initialize", grpc_rb_channel_credentials_init, -1); rb_define_method(grpc_rb_cChannelCredentials, "initialize_copy", - grpc_rb_channel_credentials_init_copy, 1); + grpc_rb_cannot_init_copy, 1); rb_define_method(grpc_rb_cChannelCredentials, "compose", grpc_rb_channel_credentials_compose, -1); rb_define_module_function(grpc_rb_cChannelCredentials, diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c index 9466402db0..fd75d2f691 100644 --- a/src/ruby/ext/grpc/rb_completion_queue.c +++ b/src/ruby/ext/grpc/rb_completion_queue.c @@ -40,12 +40,9 @@ #include <grpc/grpc.h> #include <grpc/support/time.h> +#include <grpc/support/log.h> #include "rb_grpc.h" -/* grpc_rb_cCompletionQueue is the ruby class that proxies - * grpc_completion_queue. */ -static VALUE grpc_rb_cCompletionQueue = Qnil; - /* Used to allow grpc_completion_queue_next call to release the GIL */ typedef struct next_call_stack { grpc_completion_queue *cq; @@ -55,23 +52,6 @@ typedef struct next_call_stack { volatile int interrupted; } next_call_stack; -/* Calls grpc_completion_queue_next without holding the ruby GIL */ -static void *grpc_rb_completion_queue_next_no_gil(void *param) { - next_call_stack *const next_call = (next_call_stack*)param; - gpr_timespec increment = gpr_time_from_millis(20, GPR_TIMESPAN); - gpr_timespec deadline; - do { - deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment); - next_call->event = grpc_completion_queue_next(next_call->cq, - deadline, NULL); - if (next_call->event.type != GRPC_QUEUE_TIMEOUT || - gpr_time_cmp(deadline, next_call->timeout) > 0) { - break; - } - } while (!next_call->interrupted); - return NULL; -} - /* Calls grpc_completion_queue_pluck without holding the ruby GIL */ static void *grpc_rb_completion_queue_pluck_no_gil(void *param) { next_call_stack *const next_call = (next_call_stack*)param; @@ -90,107 +70,32 @@ static void *grpc_rb_completion_queue_pluck_no_gil(void *param) { return NULL; } -/* Shuts down and drains the completion queue if necessary. - * - * This is done when the ruby completion queue object is about to be GCed. - */ -static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) { - next_call_stack next_call; - grpc_completion_type type; - int drained = 0; - MEMZERO(&next_call, next_call_stack, 1); - - grpc_completion_queue_shutdown(cq); - next_call.cq = cq; - next_call.event.type = GRPC_QUEUE_TIMEOUT; - /* TODO: the timeout should be a module level constant that defaults - * to gpr_inf_future(GPR_CLOCK_REALTIME). - * - * - at the moment this does not work, it stalls. Using a small timeout like - * this one works, and leads to fast test run times; a longer timeout was - * causing unnecessary delays in the test runs. - * - * - investigate further, this is probably another example of C-level cleanup - * not working consistently in all cases. - */ - next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(5e3, GPR_TIMESPAN)); - do { - rb_thread_call_without_gvl(grpc_rb_completion_queue_next_no_gil, - (void *)&next_call, NULL, NULL); - type = next_call.event.type; - if (type == GRPC_QUEUE_TIMEOUT) break; - if (type != GRPC_QUEUE_SHUTDOWN) { - ++drained; - rb_warning("completion queue shutdown: %d undrained events", drained); - } - } while (type != GRPC_QUEUE_SHUTDOWN); -} - /* Helper function to free a completion queue. */ -static void grpc_rb_completion_queue_destroy(void *p) { - grpc_completion_queue *cq = NULL; - if (p == NULL) { - return; - } - cq = (grpc_completion_queue *)p; - grpc_rb_completion_queue_shutdown_drain(cq); +void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq) { + /* Every function that adds an event to a queue also synchronously plucks + that event from the queue, and holds a reference to the Ruby object that + holds the queue, so we only get to this point if all of those functions + have completed, and the queue is empty */ + grpc_completion_queue_shutdown(cq); grpc_completion_queue_destroy(cq); } -static rb_data_type_t grpc_rb_completion_queue_data_type = { - "grpc_completion_queue", - {GRPC_RB_GC_NOT_MARKED, grpc_rb_completion_queue_destroy, - GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}}, - NULL, NULL, -#ifdef RUBY_TYPED_FREE_IMMEDIATELY - /* cannot immediately free because grpc_rb_completion_queue_shutdown_drain - * calls rb_thread_call_without_gvl. */ - 0, -#endif -}; - -/* Releases the c-level resources associated with a completion queue */ -static VALUE grpc_rb_completion_queue_close(VALUE self) { - grpc_completion_queue* cq = grpc_rb_get_wrapped_completion_queue(self); - grpc_rb_completion_queue_destroy(cq); - RTYPEDDATA_DATA(self) = NULL; - return Qnil; -} - -/* Allocates a completion queue. */ -static VALUE grpc_rb_completion_queue_alloc(VALUE cls) { - grpc_completion_queue *cq = grpc_completion_queue_create(NULL); - if (cq == NULL) { - rb_raise(rb_eArgError, "could not create a completion queue: not sure why"); - } - return TypedData_Wrap_Struct(cls, &grpc_rb_completion_queue_data_type, cq); -} - static void unblock_func(void *param) { next_call_stack *const next_call = (next_call_stack*)param; next_call->interrupted = 1; } -/* Blocks until the next event for given tag is available, and returns the - * event. */ -grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag, - VALUE timeout) { +/* Does the same thing as grpc_completion_queue_pluck, while properly releasing + the GVL and handling interrupts */ +grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag, + gpr_timespec deadline, void *reserved) { next_call_stack next_call; MEMZERO(&next_call, next_call_stack, 1); - TypedData_Get_Struct(self, grpc_completion_queue, - &grpc_rb_completion_queue_data_type, next_call.cq); - if (TYPE(timeout) == T_NIL) { - next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME); - } else { - next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0); - } - if (TYPE(tag) == T_NIL) { - next_call.tag = NULL; - } else { - next_call.tag = ROBJECT(tag); - } + next_call.cq = queue; + next_call.timeout = deadline; + next_call.tag = tag; next_call.event.type = GRPC_QUEUE_TIMEOUT; + (void)reserved; /* Loop until we finish a pluck without an interruption. The internal pluck function runs either until it is interrupted or it gets an event, or time runs out. @@ -210,27 +115,3 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag, next_call.event.type == GRPC_QUEUE_TIMEOUT); return next_call.event; } - -void Init_grpc_completion_queue() { - grpc_rb_cCompletionQueue = - rb_define_class_under(grpc_rb_mGrpcCore, "CompletionQueue", rb_cObject); - - /* constructor: uses an alloc func without an initializer. Using a simple - alloc func works here as the grpc header does not specify any args for - this func, so no separate initialization step is necessary. */ - rb_define_alloc_func(grpc_rb_cCompletionQueue, - grpc_rb_completion_queue_alloc); - - /* close: Provides a way to close the underlying file descriptor without - waiting for ruby garbage collection. */ - rb_define_method(grpc_rb_cCompletionQueue, "close", - grpc_rb_completion_queue_close, 0); -} - -/* Gets the wrapped completion queue from the ruby wrapper */ -grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v) { - grpc_completion_queue *cq = NULL; - TypedData_Get_Struct(v, grpc_completion_queue, - &grpc_rb_completion_queue_data_type, cq); - return cq; -} diff --git a/src/ruby/ext/grpc/rb_completion_queue.h b/src/ruby/ext/grpc/rb_completion_queue.h index 42de43c3fb..9f8f6aa5ff 100644 --- a/src/ruby/ext/grpc/rb_completion_queue.h +++ b/src/ruby/ext/grpc/rb_completion_queue.h @@ -41,15 +41,14 @@ /* Gets the wrapped completion queue from the ruby wrapper */ grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v); +void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq); + /** * Makes the implementation of CompletionQueue#pluck available in other files * * This avoids having code that holds the GIL repeated at multiple sites. */ -grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag, - VALUE timeout); - -/* Initializes the CompletionQueue class. */ -void Init_grpc_completion_queue(); +grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag, + gpr_timespec deadline, void *reserved); #endif /* GRPC_RB_COMPLETION_QUEUE_H_ */ diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c index 9246893f9f..188a62475d 100644 --- a/src/ruby/ext/grpc/rb_grpc.c +++ b/src/ruby/ext/grpc/rb_grpc.c @@ -46,7 +46,6 @@ #include "rb_call_credentials.h" #include "rb_channel.h" #include "rb_channel_credentials.h" -#include "rb_completion_queue.h" #include "rb_loader.h" #include "rb_server.h" #include "rb_server_credentials.h" @@ -85,7 +84,7 @@ VALUE grpc_rb_cannot_init(VALUE self) { VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self) { (void)self; rb_raise(rb_eTypeError, - "initialization of %s only allowed from the gRPC native layer", + "Copy initialization of %s is not supported", rb_obj_classname(copy)); return Qnil; } @@ -318,7 +317,7 @@ void Init_grpc_c() { grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core"); grpc_rb_sNewServerRpc = rb_struct_define("NewServerRpc", "method", "host", - "deadline", "metadata", "call", "cq", NULL); + "deadline", "metadata", "call", NULL); grpc_rb_sStatus = rb_struct_define("Status", "code", "details", "metadata", NULL); sym_code = ID2SYM(rb_intern("code")); @@ -326,7 +325,6 @@ void Init_grpc_c() { sym_metadata = ID2SYM(rb_intern("metadata")); Init_grpc_channel(); - Init_grpc_completion_queue(); Init_grpc_call(); Init_grpc_call_credentials(); Init_grpc_channel_credentials(); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 13f961495c..6f0974e31b 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -470,7 +470,7 @@ extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import; typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer); extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import; #define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import -typedef void(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer); +typedef int(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer); extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import; #define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader); diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c index f108b8acfc..bf26841fd2 100644 --- a/src/ruby/ext/grpc/rb_server.c +++ b/src/ruby/ext/grpc/rb_server.c @@ -38,6 +38,7 @@ #include <grpc/grpc.h> #include <grpc/grpc_security.h> +#include <grpc/support/log.h> #include "rb_call.h" #include "rb_channel_args.h" #include "rb_completion_queue.h" @@ -53,53 +54,51 @@ static ID id_at; /* id_insecure_server is used to indicate that a server is insecure */ static VALUE id_insecure_server; -/* grpc_rb_server wraps a grpc_server. It provides a peer ruby object, - 'mark' to minimize copying when a server is created from ruby. */ +/* grpc_rb_server wraps a grpc_server. */ typedef struct grpc_rb_server { - /* Holder of ruby objects involved in constructing the server */ - VALUE mark; /* The actual server */ grpc_server *wrapped; grpc_completion_queue *queue; } grpc_rb_server; +static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) { + grpc_event ev; + if (server->wrapped != NULL) { + grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL); + ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL); + if (ev.type == GRPC_QUEUE_TIMEOUT) { + grpc_server_cancel_all_calls(server->wrapped); + rb_completion_queue_pluck(server->queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + } + grpc_server_destroy(server->wrapped); + grpc_rb_completion_queue_destroy(server->queue); + server->wrapped = NULL; + server->queue = NULL; + } +} + /* Destroys server instances. */ static void grpc_rb_server_free(void *p) { grpc_rb_server *svr = NULL; + gpr_timespec deadline; if (p == NULL) { return; }; svr = (grpc_rb_server *)p; - /* Deletes the wrapped object if the mark object is Qnil, which indicates - that no other object is the actual owner. */ - /* grpc_server_shutdown does not exist. Change this to something that does - or delete it */ - if (svr->wrapped != NULL && svr->mark == Qnil) { - // grpc_server_shutdown(svr->wrapped); - // Aborting to indicate a bug - abort(); - grpc_server_destroy(svr->wrapped); - } + deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_seconds(2, GPR_TIMESPAN)); - xfree(p); -} + destroy_server(svr, deadline); -/* Protects the mark object from GC */ -static void grpc_rb_server_mark(void *p) { - grpc_rb_server *server = NULL; - if (p == NULL) { - return; - } - server = (grpc_rb_server *)p; - if (server->mark != Qnil) { - rb_gc_mark(server->mark); - } + xfree(p); } static const rb_data_type_t grpc_rb_server_data_type = { "grpc_server", - {grpc_rb_server_mark, grpc_rb_server_free, GRPC_RB_MEMSIZE_UNAVAILABLE, + {GRPC_RB_GC_NOT_MARKED, grpc_rb_server_free, GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}}, NULL, NULL, @@ -116,23 +115,20 @@ static const rb_data_type_t grpc_rb_server_data_type = { static VALUE grpc_rb_server_alloc(VALUE cls) { grpc_rb_server *wrapper = ALLOC(grpc_rb_server); wrapper->wrapped = NULL; - wrapper->mark = Qnil; return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper); } /* call-seq: - cq = CompletionQueue.new - server = Server.new(cq, {'arg1': 'value1'}) + server = Server.new({'arg1': 'value1'}) Initializes server instances. */ -static VALUE grpc_rb_server_init(VALUE self, VALUE cqueue, VALUE channel_args) { - grpc_completion_queue *cq = NULL; +static VALUE grpc_rb_server_init(VALUE self, VALUE channel_args) { + grpc_completion_queue *cq = grpc_completion_queue_create(NULL); grpc_rb_server *wrapper = NULL; grpc_server *srv = NULL; grpc_channel_args args; MEMZERO(&args, grpc_channel_args, 1); - cq = grpc_rb_get_wrapped_completion_queue(cqueue); TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, wrapper); grpc_rb_hash_convert_to_channel_args(channel_args, &args); @@ -148,41 +144,9 @@ static VALUE grpc_rb_server_init(VALUE self, VALUE cqueue, VALUE channel_args) { wrapper->wrapped = srv; wrapper->queue = cq; - /* Add the cq as the server's mark object. This ensures the ruby cq can't be - GCed before the server */ - wrapper->mark = cqueue; return self; } -/* Clones Server instances. - - Gives Server a consistent implementation of Ruby's object copy/dup - protocol. */ -static VALUE grpc_rb_server_init_copy(VALUE copy, VALUE orig) { - grpc_rb_server *orig_srv = NULL; - grpc_rb_server *copy_srv = NULL; - - if (copy == orig) { - return copy; - } - - /* Raise an error if orig is not a server object or a subclass. */ - if (TYPE(orig) != T_DATA || - RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_server_free) { - rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(grpc_rb_cServer)); - } - - TypedData_Get_Struct(orig, grpc_rb_server, &grpc_rb_server_data_type, - orig_srv); - TypedData_Get_Struct(copy, grpc_rb_server, &grpc_rb_server_data_type, - copy_srv); - - /* use ruby's MEMCPY to make a byte-for-byte copy of the server wrapper - object. */ - MEMCPY(copy_srv, orig_srv, grpc_rb_server, 1); - return copy; -} - /* request_call_stack holds various values used by the * grpc_rb_server_request_call function */ typedef struct request_call_stack { @@ -208,65 +172,57 @@ static void grpc_request_call_stack_cleanup(request_call_stack* st) { } /* call-seq: - cq = CompletionQueue.new - tag = Object.new - timeout = 10 - server.request_call(cqueue, tag, timeout) + server.request_call Requests notification of a new call on a server. */ -static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue, - VALUE tag_new, VALUE timeout) { +static VALUE grpc_rb_server_request_call(VALUE self) { grpc_rb_server *s = NULL; grpc_call *call = NULL; grpc_event ev; grpc_call_error err; request_call_stack st; VALUE result; + void *tag = (void*)&st; + grpc_completion_queue *call_queue = grpc_completion_queue_create(NULL); gpr_timespec deadline; TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); if (s->wrapped == NULL) { rb_raise(rb_eRuntimeError, "destroyed!"); return Qnil; - } else { - grpc_request_call_stack_init(&st); - /* call grpc_server_request_call, then wait for it to complete using - * pluck_event */ - err = grpc_server_request_call( - s->wrapped, &call, &st.details, &st.md_ary, - grpc_rb_get_wrapped_completion_queue(cqueue), - grpc_rb_get_wrapped_completion_queue(s->mark), - ROBJECT(tag_new)); - if (err != GRPC_CALL_OK) { - grpc_request_call_stack_cleanup(&st); - rb_raise(grpc_rb_eCallError, - "grpc_server_request_call failed: %s (code=%d)", - grpc_call_error_detail_of(err), err); - return Qnil; - } - - ev = grpc_rb_completion_queue_pluck_event(s->mark, tag_new, timeout); - if (ev.type == GRPC_QUEUE_TIMEOUT) { - grpc_request_call_stack_cleanup(&st); - return Qnil; - } - if (!ev.success) { - grpc_request_call_stack_cleanup(&st); - rb_raise(grpc_rb_eCallError, "request_call completion failed"); - return Qnil; - } + } + grpc_request_call_stack_init(&st); + /* call grpc_server_request_call, then wait for it to complete using + * pluck_event */ + err = grpc_server_request_call( + s->wrapped, &call, &st.details, &st.md_ary, + call_queue, s->queue, tag); + if (err != GRPC_CALL_OK) { + grpc_request_call_stack_cleanup(&st); + rb_raise(grpc_rb_eCallError, + "grpc_server_request_call failed: %s (code=%d)", + grpc_call_error_detail_of(err), err); + return Qnil; + } - /* build the NewServerRpc struct result */ - deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME); - result = rb_struct_new( - grpc_rb_sNewServerRpc, rb_str_new2(st.details.method), - rb_str_new2(st.details.host), - rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec), - INT2NUM(deadline.tv_nsec)), - grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), cqueue, NULL); + ev = rb_completion_queue_pluck(s->queue, tag, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + if (!ev.success) { grpc_request_call_stack_cleanup(&st); - return result; + rb_raise(grpc_rb_eCallError, "request_call completion failed"); + return Qnil; } - return Qnil; + + /* build the NewServerRpc struct result */ + deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME); + result = rb_struct_new( + grpc_rb_sNewServerRpc, rb_str_new2(st.details.method), + rb_str_new2(st.details.host), + rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec), + INT2NUM(deadline.tv_nsec)), + grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue), + NULL); + grpc_request_call_stack_cleanup(&st); + return result; } static VALUE grpc_rb_server_start(VALUE self) { @@ -282,41 +238,33 @@ static VALUE grpc_rb_server_start(VALUE self) { /* call-seq: - cq = CompletionQueue.new - server = Server.new(cq, {'arg1': 'value1'}) + server = Server.new({'arg1': 'value1'}) ... // do stuff with server ... ... // to shutdown the server - server.destroy(cq) + server.destroy() ... // to shutdown the server with a timeout - server.destroy(cq, timeout) + server.destroy(timeout) Destroys server instances. */ static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) { - VALUE cqueue = Qnil; VALUE timeout = Qnil; - grpc_completion_queue *cq = NULL; - grpc_event ev; + gpr_timespec deadline; grpc_rb_server *s = NULL; - /* "11" == 1 mandatory args, 1 (timeout) is optional */ - rb_scan_args(argc, argv, "11", &cqueue, &timeout); - cq = grpc_rb_get_wrapped_completion_queue(cqueue); + /* "01" == 0 mandatory args, 1 (timeout) is optional */ + rb_scan_args(argc, argv, "01", &timeout); TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); - - if (s->wrapped != NULL) { - grpc_server_shutdown_and_notify(s->wrapped, cq, NULL); - ev = grpc_rb_completion_queue_pluck_event(cqueue, Qnil, timeout); - if (!ev.success) { - rb_warn("server shutdown failed, cancelling the calls, objects may leak"); - grpc_server_cancel_all_calls(s->wrapped); - return Qfalse; - } - grpc_server_destroy(s->wrapped); - s->wrapped = NULL; + if (TYPE(timeout) == T_NIL) { + deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + } else { + deadline = grpc_rb_time_timeval(timeout, /* absolute time*/ 0); } - return Qtrue; + + destroy_server(s, deadline); + + return Qnil; } /* @@ -376,13 +324,13 @@ void Init_grpc_server() { rb_define_alloc_func(grpc_rb_cServer, grpc_rb_server_alloc); /* Provides a ruby constructor and support for dup/clone. */ - rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init, 2); + rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init, 1); rb_define_method(grpc_rb_cServer, "initialize_copy", - grpc_rb_server_init_copy, 1); + grpc_rb_cannot_init_copy, 1); /* Add the server methods. */ rb_define_method(grpc_rb_cServer, "request_call", - grpc_rb_server_request_call, 3); + grpc_rb_server_request_call, 0); rb_define_method(grpc_rb_cServer, "start", grpc_rb_server_start, 0); rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, -1); rb_define_alias(grpc_rb_cServer, "close", "destroy"); diff --git a/src/ruby/ext/grpc/rb_server_credentials.c b/src/ruby/ext/grpc/rb_server_credentials.c index 3b0fb6c910..a44ce715ae 100644 --- a/src/ruby/ext/grpc/rb_server_credentials.c +++ b/src/ruby/ext/grpc/rb_server_credentials.c @@ -38,6 +38,7 @@ #include <grpc/grpc.h> #include <grpc/grpc_security.h> +#include <grpc/support/log.h> #include "rb_grpc.h" @@ -46,8 +47,8 @@ static VALUE grpc_rb_cServerCredentials = Qnil; /* grpc_rb_server_credentials wraps a grpc_server_credentials. It provides a - peer ruby object, 'mark' to minimize copying when a server credential is - created from ruby. */ + peer ruby object, 'mark' to hold references to objects involved in + constructing the server credentials. */ typedef struct grpc_rb_server_credentials { /* Holder of ruby objects involved in constructing the server credentials */ VALUE mark; @@ -111,36 +112,6 @@ static VALUE grpc_rb_server_credentials_alloc(VALUE cls) { wrapper); } -/* Clones ServerCredentials instances. - - Gives ServerCredentials a consistent implementation of Ruby's object copy/dup - protocol. */ -static VALUE grpc_rb_server_credentials_init_copy(VALUE copy, VALUE orig) { - grpc_rb_server_credentials *orig_ch = NULL; - grpc_rb_server_credentials *copy_ch = NULL; - - if (copy == orig) { - return copy; - } - - /* Raise an error if orig is not a server_credentials object or a subclass. */ - if (TYPE(orig) != T_DATA || - RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_server_credentials_free) { - rb_raise(rb_eTypeError, "not a %s", - rb_obj_classname(grpc_rb_cServerCredentials)); - } - - TypedData_Get_Struct(orig, grpc_rb_server_credentials, - &grpc_rb_server_credentials_data_type, orig_ch); - TypedData_Get_Struct(copy, grpc_rb_server_credentials, - &grpc_rb_server_credentials_data_type, copy_ch); - - /* use ruby's MEMCPY to make a byte-for-byte copy of the server_credentials - wrapper object. */ - MEMCPY(copy_ch, orig_ch, grpc_rb_server_credentials, 1); - return copy; -} - /* The attribute used on the mark object to preserve the pem_root_certs. */ static ID id_pem_root_certs; @@ -270,7 +241,7 @@ void Init_grpc_server_credentials() { rb_define_method(grpc_rb_cServerCredentials, "initialize", grpc_rb_server_credentials_init, 3); rb_define_method(grpc_rb_cServerCredentials, "initialize_copy", - grpc_rb_server_credentials_init_copy, 1); + grpc_rb_cannot_init_copy, 1); id_pem_key_certs = rb_intern("__pem_key_certs"); id_pem_root_certs = rb_intern("__pem_root_certs"); diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index b03ddbc193..4260d85437 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -43,8 +43,7 @@ class Struct GRPC.logger.debug("Failing with status #{status}") # raise BadStatus, propagating the metadata if present. md = status.metadata - with_sym_keys = Hash[md.each_pair.collect { |x, y| [x.to_sym, y] }] - fail GRPC::BadStatus.new(status.code, status.details, with_sym_keys) + fail GRPC::BadStatus.new(status.code, status.details, md) end status end @@ -61,7 +60,7 @@ module GRPC extend Forwardable attr_reader(:deadline) def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=, - :peer, :peer_cert + :peer, :peer_cert, :trailing_metadata # client_invoke begins a client invocation. # @@ -75,17 +74,10 @@ module GRPC # if a keyword value is a list, multiple metadata for it's key are sent # # @param call [Call] a call on which to start and invocation - # @param q [CompletionQueue] the completion queue # @param metadata [Hash] the metadata - def self.client_invoke(call, q, metadata = {}) + def self.client_invoke(call, metadata = {}) fail(TypeError, '!Core::Call') unless call.is_a? Core::Call - unless q.is_a? Core::CompletionQueue - fail(TypeError, '!Core::CompletionQueue') - end - metadata_tag = Object.new - call.run_batch(q, metadata_tag, INFINITE_FUTURE, - SEND_INITIAL_METADATA => metadata) - metadata_tag + call.run_batch(SEND_INITIAL_METADATA => metadata) end # Creates an ActiveCall. @@ -102,26 +94,21 @@ module GRPC # deadline is the absolute deadline for the call. # # @param call [Call] the call used by the ActiveCall - # @param q [CompletionQueue] the completion queue used to accept - # the call. This queue will be closed on call completion. # @param marshal [Function] f(obj)->string that marshal requests # @param unmarshal [Function] f(string)->obj that unmarshals responses # @param deadline [Fixnum] the deadline for the call to complete - # @param metadata_tag [Object] the object use obtain metadata for clients - # @param started [true|false] indicates if the call has begun - def initialize(call, q, marshal, unmarshal, deadline, started: true, - metadata_tag: nil) + # @param started [true|false] indicates that metadata was sent + # @param metadata_received [true|false] indicates if metadata has already + # been received. Should always be true for server calls + def initialize(call, marshal, unmarshal, deadline, started: true, + metadata_received: false) fail(TypeError, '!Core::Call') unless call.is_a? Core::Call - unless q.is_a? Core::CompletionQueue - fail(TypeError, '!Core::CompletionQueue') - end @call = call - @cq = q @deadline = deadline @marshal = marshal - @started = started @unmarshal = unmarshal - @metadata_tag = metadata_tag + @metadata_received = metadata_received + @metadata_sent = started @op_notifier = nil end @@ -132,7 +119,7 @@ module GRPC end # cancelled indicates if the call was cancelled - def cancelled + def cancelled? !@call.status.nil? && @call.status.code == Core::StatusCodes::CANCELLED end @@ -168,8 +155,11 @@ module GRPC SEND_CLOSE_FROM_CLIENT => nil } ops[RECV_STATUS_ON_CLIENT] = nil if assert_finished - batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops) + batch_result = @call.run_batch(ops) return unless assert_finished + unless batch_result.status.nil? + @call.trailing_metadata = batch_result.status.metadata + end @call.status = batch_result.status op_is_done batch_result.check_status @@ -179,20 +169,14 @@ module GRPC # # It blocks until the remote endpoint acknowledges by sending a status. def finished - batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, - RECV_STATUS_ON_CLIENT => nil) + batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil) unless batch_result.status.nil? - if @call.metadata.nil? - @call.metadata = batch_result.status.metadata - else - @call.metadata.merge!(batch_result.status.metadata) - end + @call.trailing_metadata = batch_result.status.metadata end @call.status = batch_result.status op_is_done batch_result.check_status @call.close - @cq.close end # remote_send sends a request to the remote endpoint. @@ -203,9 +187,10 @@ module GRPC # @param marshalled [false, true] indicates if the object is already # marshalled. def remote_send(req, marshalled = false) + # TODO(murgatroid99): ensure metadata was sent GRPC.logger.debug("sending #{req}, marshalled? #{marshalled}") payload = marshalled ? req : @marshal.call(req) - @call.run_batch(@cq, self, INFINITE_FUTURE, SEND_MESSAGE => payload) + @call.run_batch(SEND_MESSAGE => payload) end # send_status sends a status to the remote endpoint. @@ -222,7 +207,7 @@ module GRPC SEND_STATUS_FROM_SERVER => Struct::Status.new(code, details, metadata) } ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished - @call.run_batch(@cq, self, INFINITE_FUTURE, ops) + @call.run_batch(ops) nil end @@ -234,11 +219,11 @@ module GRPC # raising BadStatus def remote_read ops = { RECV_MESSAGE => nil } - ops[RECV_INITIAL_METADATA] = nil unless @metadata_tag.nil? - batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops) - unless @metadata_tag.nil? + ops[RECV_INITIAL_METADATA] = nil unless @metadata_received + batch_result = @call.run_batch(ops) + unless @metadata_received @call.metadata = batch_result.metadata - @metadata_tag = nil + @metadata_received = true end GRPC.logger.debug("received req: #{batch_result}") unless batch_result.nil? || batch_result.message.nil? @@ -318,7 +303,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Object] the response received from the server def request_response(req, metadata: {}) - start_call(metadata) unless @started + start_call(metadata) remote_send(req) writes_done(false) response = remote_read @@ -342,7 +327,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Object] the response received from the server def client_streamer(requests, metadata: {}) - start_call(metadata) unless @started + start_call(metadata) requests.each { |r| remote_send(r) } writes_done(false) response = remote_read @@ -368,7 +353,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Enumerator|nil] a response Enumerator def server_streamer(req, metadata: {}) - start_call(metadata) unless @started + start_call(metadata) remote_send(req) writes_done(false) replies = enum_for(:each_remote_read_then_finish) @@ -407,10 +392,9 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Enumerator, nil] a response Enumerator def bidi_streamer(requests, metadata: {}, &blk) - start_call(metadata) unless @started - bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, - metadata_tag: @metadata_tag) - @metadata_tag = nil # run_on_client ensures metadata is read + start_call(metadata) + bd = BidiCall.new(@call, @marshal, @unmarshal, + metadata_received: @metadata_received) bd.run_on_client(requests, @op_notifier, &blk) end @@ -426,7 +410,8 @@ module GRPC # # @param gen_each_reply [Proc] generates the BiDi stream replies def run_server_bidi(gen_each_reply) - bd = BidiCall.new(@call, @cq, @marshal, @unmarshal) + bd = BidiCall.new(@call, @marshal, @unmarshal, + metadata_received: @metadata_received) bd.run_on_server(gen_each_reply) end @@ -449,9 +434,9 @@ module GRPC # @param metadata [Hash] metadata to be sent to the server. If a value is # a list, multiple metadata for its key are sent def start_call(metadata = {}) - return if @started - @metadata_tag = ActiveCall.client_invoke(@call, @cq, metadata) - @started = true + return if @metadata_sent + @metadata_tag = ActiveCall.client_invoke(@call, metadata) + @metadata_sent = true end def self.view_class(*visible_methods) @@ -468,18 +453,18 @@ module GRPC # SingleReqView limits access to an ActiveCall's methods for use in server # handlers that receive just one request. - SingleReqView = view_class(:cancelled, :deadline, :metadata, + SingleReqView = view_class(:cancelled?, :deadline, :metadata, :output_metadata, :peer, :peer_cert) # MultiReqView limits access to an ActiveCall's methods for use in # server client_streamer handlers. - MultiReqView = view_class(:cancelled, :deadline, :each_queued_msg, + MultiReqView = view_class(:cancelled?, :deadline, :each_queued_msg, :each_remote_read, :metadata, :output_metadata) # Operation limits access to an ActiveCall's methods for use as # a Operation on the client. - Operation = view_class(:cancel, :cancelled, :deadline, :execute, + Operation = view_class(:cancel, :cancelled?, :deadline, :execute, :metadata, :status, :start_call, :wait, :write_flag, - :write_flag=) + :write_flag=, :trailing_metadata) end end diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index 238f409a1d..425dc3e519 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -52,23 +52,18 @@ module GRPC # deadline is the absolute deadline for the call. # # @param call [Call] the call used by the ActiveCall - # @param q [CompletionQueue] the completion queue used to accept - # the call # @param marshal [Function] f(obj)->string that marshal requests # @param unmarshal [Function] f(string)->obj that unmarshals responses - # @param metadata_tag [Object] tag object used to collect metadata - def initialize(call, q, marshal, unmarshal, metadata_tag: nil) + # @param metadata_received [true|false] indicates if metadata has already + # been received. Should always be true for server calls + def initialize(call, marshal, unmarshal, metadata_received: false) fail(ArgumentError, 'not a call') unless call.is_a? Core::Call - unless q.is_a? Core::CompletionQueue - fail(ArgumentError, 'not a CompletionQueue') - end @call = call - @cq = q @marshal = marshal @op_notifier = nil # signals completion on clients @readq = Queue.new @unmarshal = unmarshal - @metadata_tag = metadata_tag + @metadata_received = metadata_received @reads_complete = false @writes_complete = false @complete = false @@ -124,7 +119,6 @@ module GRPC @done_mutex.synchronize do return unless @reads_complete && @writes_complete && !@complete @call.close - @cq.close @complete = true end end @@ -132,11 +126,11 @@ module GRPC # performs a read using @call.run_batch, ensures metadata is set up def read_using_run_batch ops = { RECV_MESSAGE => nil } - ops[RECV_INITIAL_METADATA] = nil unless @metadata_tag.nil? - batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops) - unless @metadata_tag.nil? + ops[RECV_INITIAL_METADATA] = nil unless @metadata_received + batch_result = @call.run_batch(ops) + unless @metadata_received @call.metadata = batch_result.metadata - @metadata_tag = nil + @metadata_received = true end batch_result end @@ -161,20 +155,26 @@ module GRPC def write_loop(requests, is_client: true) GRPC.logger.debug('bidi-write-loop: starting') - write_tag = Object.new count = 0 requests.each do |req| GRPC.logger.debug("bidi-write-loop: #{count}") count += 1 payload = @marshal.call(req) - @call.run_batch(@cq, write_tag, INFINITE_FUTURE, - SEND_MESSAGE => payload) + # Fails if status already received + begin + @call.run_batch(SEND_MESSAGE => payload) + rescue GRPC::Core::CallError => e + # This is almost definitely caused by a status arriving while still + # writing. Don't re-throw the error + GRPC.logger.warn('bidi-write-loop: ended with error') + GRPC.logger.warn(e) + break + end end GRPC.logger.debug("bidi-write-loop: #{count} writes done") if is_client GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting") - @call.run_batch(@cq, write_tag, INFINITE_FUTURE, - SEND_CLOSE_FROM_CLIENT => nil) + @call.run_batch(SEND_CLOSE_FROM_CLIENT => nil) GRPC.logger.debug('bidi-write-loop: done') notify_done @writes_complete = true @@ -195,7 +195,6 @@ module GRPC Thread.new do GRPC.logger.debug('bidi-read-loop: starting') begin - read_tag = Object.new count = 0 # queue the initial read before beginning the loop loop do @@ -208,8 +207,7 @@ module GRPC GRPC.logger.debug("bidi-read-loop: null batch #{batch_result}") if is_client - batch_result = @call.run_batch(@cq, read_tag, INFINITE_FUTURE, - RECV_STATUS_ON_CLIENT => nil) + batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil) @call.status = batch_result.status batch_result.check_status GRPC.logger.debug("bidi-read-loop: done status #{@call.status}") diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb index cddca13d17..9d6bd3bf59 100644 --- a/src/ruby/lib/grpc/generic/client_stub.rb +++ b/src/ruby/lib/grpc/generic/client_stub.rb @@ -90,19 +90,16 @@ module GRPC # when present, this is the default timeout used for calls # # @param host [String] the host the stub connects to - # @param q [Core::CompletionQueue] used to wait for events - now deprecated - # since each new active call gets its own separately # @param creds [Core::ChannelCredentials|Symbol] the channel credentials, or # :this_channel_is_insecure # @param channel_override [Core::Channel] a pre-created channel # @param timeout [Number] the default timeout to use in requests # @param channel_args [Hash] the channel arguments - def initialize(host, q, creds, + def initialize(host, creds, channel_override: nil, timeout: nil, propagate_mask: nil, channel_args: {}) - fail(TypeError, '!CompletionQueue') unless q.is_a?(Core::CompletionQueue) @ch = ClientStub.setup_channel(channel_override, host, creds, channel_args) alt_host = channel_args[Core::Channel::SSL_TARGET] @@ -441,15 +438,13 @@ module GRPC deadline = from_relative_time(@timeout) if deadline.nil? # Provide each new client call with its own completion queue - call_queue = Core::CompletionQueue.new - call = @ch.create_call(call_queue, - parent, # parent call + call = @ch.create_call(parent, # parent call @propagate_mask, # propagation options method, nil, # host use nil, deadline) call.set_credentials! credentials unless credentials.nil? - ActiveCall.new(call, call_queue, marshal, unmarshal, deadline, + ActiveCall.new(call, marshal, unmarshal, deadline, started: false) end end diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index ab7333d133..c92a532a50 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -159,16 +159,6 @@ module GRPC # Signal check period is 0.25s SIGNAL_CHECK_PERIOD = 0.25 - # setup_cq is used by #initialize to constuct a Core::CompletionQueue from - # its arguments. - def self.setup_cq(alt_cq) - return Core::CompletionQueue.new if alt_cq.nil? - unless alt_cq.is_a? Core::CompletionQueue - fail(TypeError, '!CompletionQueue') - end - alt_cq - end - # setup_connect_md_proc is used by #initialize to validate the # connect_md_proc. def self.setup_connect_md_proc(a_proc) @@ -191,10 +181,6 @@ module GRPC # * pool_size: the size of the thread pool the server uses to run its # threads # - # * completion_queue_override: when supplied, this will be used as the - # completion_queue that the server uses to receive network events, - # otherwise its creates a new instance itself - # # * creds: [GRPC::Core::ServerCredentials] # the credentials used to secure the server # @@ -212,11 +198,9 @@ module GRPC def initialize(pool_size:DEFAULT_POOL_SIZE, max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS, poll_period:DEFAULT_POLL_PERIOD, - completion_queue_override:nil, connect_md_proc:nil, server_args:{}) @connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc) - @cq = RpcServer.setup_cq(completion_queue_override) @max_waiting_requests = max_waiting_requests @poll_period = poll_period @pool_size = pool_size @@ -226,7 +210,7 @@ module GRPC # running_state can take 4 values: :not_started, :running, :stopping, and # :stopped. State transitions can only proceed in that order. @running_state = :not_started - @server = Core::Server.new(@cq, server_args) + @server = Core::Server.new(server_args) end # stops a running server @@ -240,7 +224,7 @@ module GRPC transition_running_state(:stopping) end deadline = from_relative_time(@poll_period) - @server.close(@cq, deadline) + @server.close(deadline) @pool.stop end @@ -355,7 +339,8 @@ module GRPC return an_rpc if @pool.jobs_waiting <= @max_waiting_requests GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}") noop = proc { |x| x } - c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline) + c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline, + metadata_received: true) c.send_status(GRPC::Core::StatusCodes::RESOURCE_EXHAUSTED, '') nil end @@ -366,7 +351,8 @@ module GRPC return an_rpc if rpc_descs.key?(mth) GRPC.logger.warn("UNIMPLEMENTED: #{an_rpc}") noop = proc { |x| x } - c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline) + c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline, + metadata_received: true) c.send_status(GRPC::Core::StatusCodes::UNIMPLEMENTED, '') nil end @@ -374,11 +360,9 @@ module GRPC # handles calls to the server def loop_handle_server_calls fail 'not started' if running_state == :not_started - loop_tag = Object.new while running_state == :running begin - comp_queue = Core::CompletionQueue.new - an_rpc = @server.request_call(comp_queue, loop_tag, INFINITE_FUTURE) + an_rpc = @server.request_call break if (!an_rpc.nil?) && an_rpc.call.nil? active_call = new_active_server_call(an_rpc) unless active_call.nil? @@ -410,15 +394,13 @@ module GRPC return nil if an_rpc.nil? || an_rpc.call.nil? # allow the metadata to be accessed from the call - handle_call_tag = Object.new an_rpc.call.metadata = an_rpc.metadata # attaches md to call for handlers GRPC.logger.debug("call md is #{an_rpc.metadata}") connect_md = nil unless @connect_md_proc.nil? connect_md = @connect_md_proc.call(an_rpc.method, an_rpc.metadata) end - an_rpc.call.run_batch(an_rpc.cq, handle_call_tag, INFINITE_FUTURE, - SEND_INITIAL_METADATA => connect_md) + an_rpc.call.run_batch(SEND_INITIAL_METADATA => connect_md) return nil unless available?(an_rpc) return nil unless implemented?(an_rpc) @@ -426,9 +408,9 @@ module GRPC # Create the ActiveCall GRPC.logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})") rpc_desc = rpc_descs[an_rpc.method.to_sym] - c = ActiveCall.new(an_rpc.call, an_rpc.cq, - rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input), - an_rpc.deadline) + c = ActiveCall.new(an_rpc.call, rpc_desc.marshal_proc, + rpc_desc.unmarshal_proc(:input), an_rpc.deadline, + metadata_received: true) mth = an_rpc.method.to_sym [c, mth] end diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb index f30242ee80..7cb9f1cc99 100644 --- a/src/ruby/lib/grpc/generic/service.rb +++ b/src/ruby/lib/grpc/generic/service.rb @@ -168,7 +168,7 @@ module GRPC # @param kw [KeywordArgs] the channel arguments, plus any optional # args for configuring the client's channel def initialize(host, creds, **kw) - super(host, Core::CompletionQueue.new, creds, **kw) + super(host, creds, **kw) end # Used define_method to add a method for each rpc_desc. Each method diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb index 5e6aaef2eb..6e62af94d4 100644 --- a/src/ruby/lib/grpc/version.rb +++ b/src/ruby/lib/grpc/version.rb @@ -29,5 +29,5 @@ # GRPC contains the General RPC module. module GRPC - VERSION = '0.16.0.dev' + VERSION = '1.1.0.dev' end diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb index b6695482a2..066a7bb90f 100755 --- a/src/ruby/pb/test/client.rb +++ b/src/ruby/pb/test/client.rb @@ -197,6 +197,25 @@ class PingPongPlayer end end +class BlockingEnumerator + include Grpc::Testing + include Grpc::Testing::PayloadType + + def initialize(req_size, sleep_time) + @req_size = req_size + @sleep_time = sleep_time + end + + def each_item + return enum_for(:each_item) unless block_given? + req_cls = StreamingOutputCallRequest + req = req_cls.new(payload: Payload.new(body: nulls(@req_size))) + yield req + # Sleep until after the deadline should have passed + sleep(@sleep_time) + end +end + # defines methods corresponding to each interop test case. class NamedTests include Grpc::Testing @@ -315,11 +334,10 @@ class NamedTests end def timeout_on_sleeping_server - msg_sizes = [[27_182, 31_415]] - ppp = PingPongPlayer.new(msg_sizes) - deadline = GRPC::Core::TimeConsts::from_relative_time(0.001) - resps = @stub.full_duplex_call(ppp.each_item, deadline: deadline) - resps.each { |r| ppp.queue.push(r) } + enum = BlockingEnumerator.new(27_182, 2) + deadline = GRPC::Core::TimeConsts::from_relative_time(1) + resps = @stub.full_duplex_call(enum.each_item, deadline: deadline) + resps.each { } # wait to receive each request (or timeout) fail 'Should have raised GRPC::BadStatus(DEADLINE_EXCEEDED)' rescue GRPC::BadStatus => e assert("#{__callee__}: status was wrong") do @@ -351,7 +369,7 @@ class NamedTests op.execute fail 'Should have raised GRPC:Cancelled' rescue GRPC::Cancelled - assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled } + assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled? } end def cancel_after_first_response @@ -362,7 +380,7 @@ class NamedTests op.execute.each { |r| ppp.queue.push(r) } fail 'Should have raised GRPC:Cancelled' rescue GRPC::Cancelled - assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled } + assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled? } op.wait end diff --git a/src/ruby/pb/test/server.rb b/src/ruby/pb/test/server.rb index 914c7cc79d..088f281dc4 100755 --- a/src/ruby/pb/test/server.rb +++ b/src/ruby/pb/test/server.rb @@ -188,11 +188,13 @@ class TestTarget < Grpc::Testing::TestService::Service begin GRPC.logger.info('interop-server: started receiving') reqs.each do |req| - resp_size = req.response_parameters[0].size - GRPC.logger.info("read a req, response size is #{resp_size}") - resp = cls.new(payload: Payload.new(type: req.response_type, - body: nulls(resp_size))) - q.push(resp) + req.response_parameters.each do |params| + resp_size = params.size + GRPC.logger.info("read a req, response size is #{resp_size}") + resp = cls.new(payload: Payload.new(type: req.response_type, + body: nulls(resp_size))) + q.push(resp) + end end GRPC.logger.info('interop-server: finished receiving') q.push(self) diff --git a/src/ruby/spec/call_spec.rb b/src/ruby/spec/call_spec.rb index ae3ce0748a..1c44b333de 100644 --- a/src/ruby/spec/call_spec.rb +++ b/src/ruby/spec/call_spec.rb @@ -96,7 +96,6 @@ describe GRPC::Core::CallOps do end describe GRPC::Core::Call do - let(:client_queue) { GRPC::Core::CompletionQueue.new } let(:test_tag) { Object.new } let(:fake_host) { 'localhost:10101' } @@ -154,7 +153,7 @@ describe GRPC::Core::Call do end def make_test_call - @ch.create_call(client_queue, nil, nil, 'dummy_method', nil, deadline) + @ch.create_call(nil, nil, 'dummy_method', nil, deadline) end def deadline diff --git a/src/ruby/spec/channel_spec.rb b/src/ruby/spec/channel_spec.rb index 355f95c9d7..740eac631a 100644 --- a/src/ruby/spec/channel_spec.rb +++ b/src/ruby/spec/channel_spec.rb @@ -37,7 +37,6 @@ end describe GRPC::Core::Channel do let(:fake_host) { 'localhost:0' } - let(:cq) { GRPC::Core::CompletionQueue.new } def create_test_cert GRPC::Core::ChannelCredentials.new(load_test_certs[0]) @@ -122,7 +121,7 @@ describe GRPC::Core::Channel do deadline = Time.now + 5 blk = proc do - ch.create_call(cq, nil, nil, 'dummy_method', nil, deadline) + ch.create_call(nil, nil, 'dummy_method', nil, deadline) end expect(&blk).to_not raise_error end @@ -133,7 +132,7 @@ describe GRPC::Core::Channel do deadline = Time.now + 5 blk = proc do - ch.create_call(cq, nil, nil, 'dummy_method', nil, deadline) + ch.create_call(nil, nil, 'dummy_method', nil, deadline) end expect(&blk).to raise_error(RuntimeError) end diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb index d60d84996f..d9df0b9ae2 100644 --- a/src/ruby/spec/client_server_spec.rb +++ b/src/ruby/spec/client_server_spec.rb @@ -34,27 +34,23 @@ include GRPC::Core shared_context 'setup: tags' do let(:sent_message) { 'sent message' } let(:reply_text) { 'the reply' } - before(:example) do - @client_tag = Object.new - @server_tag = Object.new - end def deadline Time.now + 5 end def server_allows_client_to_proceed(metadata = {}) - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call expect(recvd_rpc).to_not eq nil server_call = recvd_rpc.call ops = { CallOps::SEND_INITIAL_METADATA => metadata } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, ops) + svr_batch = server_call.run_batch(ops) expect(svr_batch.send_metadata).to be true server_call end def new_client_call - @ch.create_call(@client_queue, nil, nil, '/method', nil, deadline) + @ch.create_call(nil, nil, '/method', nil, deadline) end end @@ -91,8 +87,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true expect(batch_result.send_message).to be true @@ -101,8 +96,7 @@ shared_examples 'basic GRPC message delivery is OK' do server_ops = { CallOps::RECV_MESSAGE => nil } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.message).to eq(sent_message) end @@ -118,8 +112,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true expect(batch_result.send_message).to be true @@ -129,8 +122,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::RECV_MESSAGE => nil, CallOps::SEND_MESSAGE => reply_text } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.message).to eq(sent_message) expect(svr_batch.send_message).to be true end @@ -150,8 +142,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_INITIAL_METADATA => md, CallOps::SEND_MESSAGE => long_request_str } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true expect(batch_result.send_message).to be true @@ -161,8 +152,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::RECV_MESSAGE => nil, CallOps::SEND_MESSAGE => long_response_str } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.message).to eq(long_request_str) expect(svr_batch.send_message).to be true @@ -171,8 +161,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::RECV_INITIAL_METADATA => nil, CallOps::RECV_MESSAGE => nil } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_close).to be true expect(batch_result.message).to eq long_response_str end @@ -189,8 +178,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true expect(batch_result.send_message).to be true @@ -200,8 +188,7 @@ shared_examples 'basic GRPC message delivery is OK' do server_ops = { CallOps::SEND_STATUS_FROM_SERVER => the_status } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.message).to eq nil expect(svr_batch.send_status).to be true end @@ -218,8 +205,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true expect(batch_result.send_message).to be true @@ -231,8 +217,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::SEND_MESSAGE => reply_text, CallOps::SEND_STATUS_FROM_SERVER => the_status } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.message).to eq sent_message expect(svr_batch.send_status).to be true expect(svr_batch.send_message).to be true @@ -244,8 +229,7 @@ shared_examples 'basic GRPC message delivery is OK' do CallOps::RECV_MESSAGE => nil, CallOps::RECV_STATUS_ON_CLIENT => nil } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_close).to be true expect(batch_result.message).to eq reply_text expect(batch_result.status).to eq the_status @@ -254,8 +238,7 @@ shared_examples 'basic GRPC message delivery is OK' do server_ops = { CallOps::RECV_CLOSE_ON_SERVER => nil } - svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + svr_batch = server_call.run_batch(server_ops) expect(svr_batch.send_close).to be true end end @@ -286,8 +269,7 @@ shared_examples 'GRPC metadata delivery works OK' do CallOps::SEND_INITIAL_METADATA => md } blk = proc do - call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + call.run_batch(client_ops) end expect(&blk).to raise_error end @@ -297,15 +279,14 @@ shared_examples 'GRPC metadata delivery works OK' do @valid_metadata.each do |md| recvd_rpc = nil rcv_thread = Thread.new do - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call end call = new_client_call client_ops = { CallOps::SEND_INITIAL_METADATA => md } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true # confirm the server can receive the client metadata @@ -338,7 +319,7 @@ shared_examples 'GRPC metadata delivery works OK' do @bad_keys.each do |md| recvd_rpc = nil rcv_thread = Thread.new do - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call end call = new_client_call @@ -347,7 +328,7 @@ shared_examples 'GRPC metadata delivery works OK' do client_ops = { CallOps::SEND_INITIAL_METADATA => nil } - call.run_batch(@client_queue, @client_tag, deadline, client_ops) + call.run_batch(client_ops) # server gets the invocation rcv_thread.join @@ -356,8 +337,7 @@ shared_examples 'GRPC metadata delivery works OK' do CallOps::SEND_INITIAL_METADATA => md } blk = proc do - recvd_rpc.call.run_batch(@server_queue, @server_tag, deadline, - server_ops) + recvd_rpc.call.run_batch(server_ops) end expect(&blk).to raise_error end @@ -366,7 +346,7 @@ shared_examples 'GRPC metadata delivery works OK' do it 'sends an empty hash if no metadata is added' do recvd_rpc = nil rcv_thread = Thread.new do - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call end call = new_client_call @@ -375,7 +355,7 @@ shared_examples 'GRPC metadata delivery works OK' do client_ops = { CallOps::SEND_INITIAL_METADATA => nil } - call.run_batch(@client_queue, @client_tag, deadline, client_ops) + call.run_batch(client_ops) # server gets the invocation but sends no metadata back rcv_thread.join @@ -384,14 +364,13 @@ shared_examples 'GRPC metadata delivery works OK' do server_ops = { CallOps::SEND_INITIAL_METADATA => nil } - server_call.run_batch(@server_queue, @server_tag, deadline, server_ops) + server_call.run_batch(server_ops) # client receives nothing as expected client_ops = { CallOps::RECV_INITIAL_METADATA => nil } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.metadata).to eq({}) end @@ -399,7 +378,7 @@ shared_examples 'GRPC metadata delivery works OK' do @valid_metadata.each do |md| recvd_rpc = nil rcv_thread = Thread.new do - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call end call = new_client_call @@ -408,7 +387,7 @@ shared_examples 'GRPC metadata delivery works OK' do client_ops = { CallOps::SEND_INITIAL_METADATA => nil } - call.run_batch(@client_queue, @client_tag, deadline, client_ops) + call.run_batch(client_ops) # server gets the invocation but sends no metadata back rcv_thread.join @@ -417,14 +396,13 @@ shared_examples 'GRPC metadata delivery works OK' do server_ops = { CallOps::SEND_INITIAL_METADATA => md } - server_call.run_batch(@server_queue, @server_tag, deadline, server_ops) + server_call.run_batch(server_ops) # client receives nothing as expected client_ops = { CallOps::RECV_INITIAL_METADATA => nil } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }] expect(batch_result.metadata).to eq(replace_symbols) end @@ -435,9 +413,7 @@ end describe 'the http client/server' do before(:example) do server_host = '0.0.0.0:0' - @client_queue = GRPC::Core::CompletionQueue.new - @server_queue = GRPC::Core::CompletionQueue.new - @server = GRPC::Core::Server.new(@server_queue, nil) + @server = GRPC::Core::Server.new(nil) server_port = @server.add_http2_port(server_host, :this_port_is_insecure) @server.start @ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure) @@ -445,7 +421,7 @@ describe 'the http client/server' do after(:example) do @ch.close - @server.close(@server_queue, deadline) + @server.close(deadline) end it_behaves_like 'basic GRPC message delivery is OK' do @@ -467,11 +443,9 @@ describe 'the secure http client/server' do before(:example) do certs = load_test_certs server_host = '0.0.0.0:0' - @client_queue = GRPC::Core::CompletionQueue.new - @server_queue = GRPC::Core::CompletionQueue.new server_creds = GRPC::Core::ServerCredentials.new( nil, [{ private_key: certs[1], cert_chain: certs[2] }], false) - @server = GRPC::Core::Server.new(@server_queue, nil) + @server = GRPC::Core::Server.new(nil) server_port = @server.add_http2_port(server_host, server_creds) @server.start args = { Channel::SSL_TARGET => 'foo.test.google.fr' } @@ -480,7 +454,7 @@ describe 'the secure http client/server' do end after(:example) do - @server.close(@server_queue, deadline) + @server.close(deadline) end it_behaves_like 'basic GRPC message delivery is OK' do @@ -496,7 +470,7 @@ describe 'the secure http client/server' do expected_md = { 'k1' => 'updated-v1', 'k2' => 'v2' } recvd_rpc = nil rcv_thread = Thread.new do - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call end call = new_client_call @@ -504,8 +478,7 @@ describe 'the secure http client/server' do client_ops = { CallOps::SEND_INITIAL_METADATA => md } - batch_result = call.run_batch(@client_queue, @client_tag, deadline, - client_ops) + batch_result = call.run_batch(client_ops) expect(batch_result.send_metadata).to be true # confirm the server can receive the client metadata diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb index d9c9780c93..018580e0df 100644 --- a/src/ruby/spec/generic/active_call_spec.rb +++ b/src/ruby/spec/generic/active_call_spec.rb @@ -39,13 +39,8 @@ describe GRPC::ActiveCall do before(:each) do @pass_through = proc { |x| x } - @server_tag = Object.new - @tag = Object.new - - @client_queue = GRPC::Core::CompletionQueue.new - @server_queue = GRPC::Core::CompletionQueue.new host = '0.0.0.0:0' - @server = GRPC::Core::Server.new(@server_queue, nil) + @server = GRPC::Core::Server.new(nil) server_port = @server.add_http2_port(host, :this_port_is_insecure) @server.start @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil, @@ -53,21 +48,20 @@ describe GRPC::ActiveCall do end after(:each) do - @server.close(@server_queue, deadline) + @server.close(deadline) end describe 'restricted view methods' do before(:each) do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - @client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + @client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) end describe '#multi_req_view' do it 'exposes a fixed subset of the ActiveCall methods' do - want = %w(cancelled, deadline, each_remote_read, metadata, shutdown) + want = %w(cancelled?, deadline, each_remote_read, metadata, shutdown) v = @client_call.multi_req_view want.each do |w| expect(v.methods.include?(w)) @@ -77,7 +71,7 @@ describe GRPC::ActiveCall do describe '#single_req_view' do it 'exposes a fixed subset of the ActiveCall methods' do - want = %w(cancelled, deadline, metadata, shutdown) + want = %w(cancelled?, deadline, metadata, shutdown) v = @client_call.single_req_view want.each do |w| expect(v.methods.include?(w)) @@ -89,46 +83,42 @@ describe GRPC::ActiveCall do describe '#remote_send' do it 'allows a client to send a payload to the server' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - @client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + @client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' @client_call.remote_send(msg) # check that server rpc new was received - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call expect(recvd_rpc).to_not eq nil recvd_call = recvd_rpc.call # Accept the call, and verify that the server reads the response ok. - server_ops = { - CallOps::SEND_INITIAL_METADATA => {} - } - recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops) - server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through, - @pass_through, deadline) + server_call = ActiveCall.new(recvd_call, @pass_through, + @pass_through, deadline, + metadata_received: true) expect(server_call.remote_read).to eq(msg) end it 'marshals the payload using the marshal func' do call = make_test_call - ActiveCall.client_invoke(call, @client_queue) + ActiveCall.client_invoke(call) marshal = proc { |x| 'marshalled:' + x } - client_call = ActiveCall.new(call, @client_queue, marshal, - @pass_through, deadline) + client_call = ActiveCall.new(call, marshal, @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) # confirm that the message was marshalled - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call recvd_call = recvd_rpc.call server_ops = { CallOps::SEND_INITIAL_METADATA => nil } - recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops) - server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through, - @pass_through, deadline) + recvd_call.run_batch(server_ops) + server_call = ActiveCall.new(recvd_call, @pass_through, + @pass_through, deadline, + metadata_received: true) expect(server_call.remote_read).to eq('marshalled:' + msg) end @@ -136,23 +126,24 @@ describe GRPC::ActiveCall do TEST_WRITE_FLAGS.each do |f| it "successfully makes calls with write_flag set to #{f}" do call = make_test_call - ActiveCall.client_invoke(call, @client_queue) + ActiveCall.client_invoke(call) marshal = proc { |x| 'marshalled:' + x } - client_call = ActiveCall.new(call, @client_queue, marshal, + client_call = ActiveCall.new(call, marshal, @pass_through, deadline) msg = 'message is a string' client_call.write_flag = f client_call.remote_send(msg) # confirm that the message was marshalled - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call recvd_call = recvd_rpc.call server_ops = { CallOps::SEND_INITIAL_METADATA => nil } - recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops) - server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through, - @pass_through, deadline) + recvd_call.run_batch(server_ops) + server_call = ActiveCall.new(recvd_call, @pass_through, + @pass_through, deadline, + metadata_received: true) expect(server_call.remote_read).to eq('marshalled:' + msg) end end @@ -162,8 +153,8 @@ describe GRPC::ActiveCall do it 'sends metadata to the server when present' do call = make_test_call metadata = { k1: 'v1', k2: 'v2' } - ActiveCall.client_invoke(call, @client_queue, metadata) - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + ActiveCall.client_invoke(call, metadata) + recvd_rpc = @server.request_call recvd_call = recvd_rpc.call expect(recvd_call).to_not be_nil expect(recvd_rpc.metadata).to_not be_nil @@ -175,10 +166,9 @@ describe GRPC::ActiveCall do describe '#remote_read' do it 'reads the response sent by a server' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) server_call = expect_server_to_receive(msg) @@ -188,10 +178,9 @@ describe GRPC::ActiveCall do it 'saves no metadata when the server adds no metadata' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) server_call = expect_server_to_receive(msg) @@ -203,10 +192,9 @@ describe GRPC::ActiveCall do it 'saves metadata add by the server' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) server_call = expect_server_to_receive(msg, k1: 'v1', k2: 'v2') @@ -219,10 +207,9 @@ describe GRPC::ActiveCall do it 'get a nil msg before a status when an OK status is sent' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) client_call.writes_done(false) @@ -236,11 +223,10 @@ describe GRPC::ActiveCall do it 'unmarshals the response using the unmarshal func' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) + ActiveCall.client_invoke(call) unmarshal = proc { |x| 'unmarshalled:' + x } - client_call = ActiveCall.new(call, @client_queue, @pass_through, - unmarshal, deadline, - metadata_tag: md_tag) + client_call = ActiveCall.new(call, @pass_through, + unmarshal, deadline) # confirm the client receives the unmarshalled message msg = 'message is a string' @@ -254,17 +240,16 @@ describe GRPC::ActiveCall do describe '#each_remote_read' do it 'creates an Enumerator' do call = make_test_call - client_call = ActiveCall.new(call, @client_queue, @pass_through, + client_call = ActiveCall.new(call, @pass_through, @pass_through, deadline) expect(client_call.each_remote_read).to be_a(Enumerator) end it 'the returns an enumerator that can read n responses' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' reply = 'server_response' client_call.remote_send(msg) @@ -279,10 +264,9 @@ describe GRPC::ActiveCall do it 'the returns an enumerator that stops after an OK Status' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' reply = 'server_response' client_call.remote_send(msg) @@ -302,10 +286,9 @@ describe GRPC::ActiveCall do describe '#writes_done' do it 'finishes ok if the server sends a status response' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) expect { client_call.writes_done(false) }.to_not raise_error @@ -318,10 +301,9 @@ describe GRPC::ActiveCall do it 'finishes ok if the server sends an early status response' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) server_call = expect_server_to_receive(msg) @@ -334,10 +316,9 @@ describe GRPC::ActiveCall do it 'finishes ok if writes_done is true' do call = make_test_call - md_tag = ActiveCall.client_invoke(call, @client_queue) - client_call = ActiveCall.new(call, @client_queue, @pass_through, - @pass_through, deadline, - metadata_tag: md_tag) + ActiveCall.client_invoke(call) + client_call = ActiveCall.new(call, @pass_through, + @pass_through, deadline) msg = 'message is a string' client_call.remote_send(msg) server_call = expect_server_to_receive(msg) @@ -355,17 +336,16 @@ describe GRPC::ActiveCall do end def expect_server_to_be_invoked(**kw) - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + recvd_rpc = @server.request_call expect(recvd_rpc).to_not eq nil recvd_call = recvd_rpc.call - recvd_call.run_batch(@server_queue, @server_tag, deadline, - CallOps::SEND_INITIAL_METADATA => kw) - ActiveCall.new(recvd_call, @server_queue, @pass_through, - @pass_through, deadline) + recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => kw) + ActiveCall.new(recvd_call, @pass_through, @pass_through, deadline, + metadata_received: true, started: true) end def make_test_call - @ch.create_call(@client_queue, nil, nil, '/method', nil, deadline) + @ch.create_call(nil, nil, '/method', nil, deadline) end def deadline diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 168e7fb791..6034b5419c 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -29,11 +29,14 @@ require 'grpc' +Thread.abort_on_exception = true + def wakey_thread(&blk) n = GRPC::Notifier.new t = Thread.new do blk.call(n) end + t.abort_on_exception = true n.wait t end @@ -54,15 +57,13 @@ describe 'ClientStub' do before(:each) do Thread.abort_on_exception = true @server = nil - @server_queue = nil @method = 'an_rpc_method' @pass = OK @fail = INTERNAL - @cq = GRPC::Core::CompletionQueue.new end after(:each) do - @server.close(@server_queue) unless @server_queue.nil? + @server.close(from_relative_time(2)) unless @server.nil? end describe '#new' do @@ -70,7 +71,7 @@ describe 'ClientStub' do it 'can be created from a host and args' do opts = { channel_args: { a_channel_arg: 'an_arg' } } blk = proc do - GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts) + GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts) end expect(&blk).not_to raise_error end @@ -81,7 +82,7 @@ describe 'ClientStub' do channel_override: @ch } blk = proc do - GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts) + GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts) end expect(&blk).not_to raise_error end @@ -92,7 +93,7 @@ describe 'ClientStub' do channel_args: { a_channel_arg: 'an_arg' }, channel_override: Object.new } - GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts) + GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts) end expect(&blk).to raise_error end @@ -100,7 +101,7 @@ describe 'ClientStub' do it 'cannot be created with bad credentials' do blk = proc do opts = { channel_args: { a_channel_arg: 'an_arg' } } - GRPC::ClientStub.new(fake_host, @cq, Object.new, **opts) + GRPC::ClientStub.new(fake_host, Object.new, **opts) end expect(&blk).to raise_error end @@ -115,7 +116,7 @@ describe 'ClientStub' do } } creds = GRPC::Core::ChannelCredentials.new(certs[0], nil, nil) - GRPC::ClientStub.new(fake_host, @cq, creds, **opts) + GRPC::ClientStub.new(fake_host, creds, **opts) end expect(&blk).to_not raise_error end @@ -130,7 +131,7 @@ describe 'ClientStub' do it 'should send a request to/receive a reply from a server' do server_port = create_test_server th = run_request_response(@sent_msg, @resp, @pass) - stub = GRPC::ClientStub.new("localhost:#{server_port}", @cq, + stub = GRPC::ClientStub.new("localhost:#{server_port}", :this_channel_is_insecure) expect(get_response(stub)).to eq(@resp) th.join @@ -141,7 +142,7 @@ describe 'ClientStub' do host = "localhost:#{server_port}" th = run_request_response(@sent_msg, @resp, @pass, k1: 'v1', k2: 'v2') - stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) expect(get_response(stub)).to eq(@resp) th.join end @@ -151,7 +152,7 @@ describe 'ClientStub' do alt_host = "localhost:#{server_port}" th = run_request_response(@sent_msg, @resp, @pass) ch = GRPC::Core::Channel.new(alt_host, nil, :this_channel_is_insecure) - stub = GRPC::ClientStub.new('ignored-host', @cq, + stub = GRPC::ClientStub.new('ignored-host', :this_channel_is_insecure, channel_override: ch) expect(get_response(stub)).to eq(@resp) @@ -162,7 +163,7 @@ describe 'ClientStub' do server_port = create_test_server host = "localhost:#{server_port}" th = run_request_response(@sent_msg, @resp, @fail) - stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) blk = proc { get_response(stub) } expect(&blk).to raise_error(GRPC::BadStatus) th.join @@ -182,7 +183,8 @@ describe 'ClientStub' do def get_response(stub) op = stub.request_response(@method, @sent_msg, noop, noop, return_op: true, - metadata: { k1: 'v1', k2: 'v2' }) + metadata: { k1: 'v1', k2: 'v2' }, + deadline: from_relative_time(2)) expect(op).to be_a(GRPC::ActiveCall::Operation) op.execute end @@ -196,7 +198,7 @@ describe 'ClientStub' do before(:each) do server_port = create_test_server host = "localhost:#{server_port}" - @stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + @stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) @metadata = { k1: 'v1', k2: 'v2' } @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s } @resp = 'a_reply' @@ -262,7 +264,7 @@ describe 'ClientStub' do server_port = create_test_server host = "localhost:#{server_port}" th = run_server_streamer(@sent_msg, @replys, @pass) - stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) expect(get_responses(stub).collect { |r| r }).to eq(@replys) th.join end @@ -271,7 +273,7 @@ describe 'ClientStub' do server_port = create_test_server host = "localhost:#{server_port}" th = run_server_streamer(@sent_msg, @replys, @fail) - stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) e = get_responses(stub) expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) th.join @@ -282,7 +284,7 @@ describe 'ClientStub' do host = "localhost:#{server_port}" th = run_server_streamer(@sent_msg, @replys, @fail, k1: 'v1', k2: 'v2') - stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) e = get_responses(stub) expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) th.join @@ -327,7 +329,7 @@ describe 'ClientStub' do it 'supports sending all the requests first', bidi: true do th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys, @pass) - stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) expect(e.collect { |r| r }).to eq(@replys) th.join @@ -335,7 +337,7 @@ describe 'ClientStub' do it 'supports client-initiated ping pong', bidi: true do th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true) - stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) expect(e.collect { |r| r }).to eq(@sent_msgs) th.join @@ -343,7 +345,7 @@ describe 'ClientStub' do it 'supports a server-initiated ping pong', bidi: true do th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false) - stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure) + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) expect(e.collect { |r| r }).to eq(@sent_msgs) th.join @@ -372,26 +374,6 @@ describe 'ClientStub' do it_behaves_like 'bidi streaming' end - - describe 'without enough time to run' do - before(:each) do - @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s } - @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s } - server_port = create_test_server - @host = "localhost:#{server_port}" - end - - it 'should fail with DeadlineExceeded', bidi: true do - @server.start - stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure) - blk = proc do - e = stub.bidi_streamer(@method, @sent_msgs, noop, noop, - deadline: from_relative_time(0.001)) - e.collect { |r| r } - end - expect(&blk).to raise_error GRPC::BadStatus, /Deadline Exceeded/ - end - end end def run_server_streamer(expected_input, replys, status, **kw) @@ -460,21 +442,18 @@ describe 'ClientStub' do end def create_test_server - @server_queue = GRPC::Core::CompletionQueue.new - @server = GRPC::Core::Server.new(@server_queue, nil) + @server = GRPC::Core::Server.new(nil) @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure) end def expect_server_to_be_invoked(notifier) @server.start notifier.notify(nil) - server_tag = Object.new - recvd_rpc = @server.request_call(@server_queue, server_tag, - INFINITE_FUTURE) + recvd_rpc = @server.request_call recvd_call = recvd_rpc.call recvd_call.metadata = recvd_rpc.metadata - recvd_call.run_batch(@server_queue, server_tag, Time.now + 2, - SEND_INITIAL_METADATA => nil) - GRPC::ActiveCall.new(recvd_call, @server_queue, noop, noop, INFINITE_FUTURE) + recvd_call.run_batch(SEND_INITIAL_METADATA => nil) + GRPC::ActiveCall.new(recvd_call, noop, noop, INFINITE_FUTURE, + metadata_received: true) end end diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index 943502cea2..31157cf161 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -95,7 +95,7 @@ class FailingService def initialize(_default_var = 'ignored') @details = 'app error' @code = 101 - @md = { failed_method: 'an_rpc' } + @md = { 'failed_method' => 'an_rpc' } end def an_rpc(_req, _call) @@ -135,8 +135,6 @@ describe GRPC::RpcServer do @pass = 0 @fail = 1 @noop = proc { |x| x } - - @server_queue = GRPC::Core::CompletionQueue.new end describe '#new' do @@ -148,28 +146,6 @@ describe GRPC::RpcServer do expect(&blk).not_to raise_error end - it 'can be created with a completion queue override' do - opts = { - server_args: { a_channel_arg: 'an_arg' }, - completion_queue_override: @server_queue - } - blk = proc do - RpcServer.new(**opts) - end - expect(&blk).not_to raise_error - end - - it 'cannot be created with a bad completion queue override' do - blk = proc do - opts = { - server_args: { a_channel_arg: 'an_arg' }, - completion_queue_override: Object.new - } - RpcServer.new(**opts) - end - expect(&blk).to raise_error - end - it 'cannot be created with invalid ServerCredentials' do blk = proc do opts = { @@ -294,7 +270,6 @@ describe GRPC::RpcServer do context 'with no connect_metadata' do before(:each) do server_opts = { - completion_queue_override: @server_queue, poll_period: 1 } @srv = RpcServer.new(**server_opts) @@ -309,8 +284,7 @@ describe GRPC::RpcServer do @srv.wait_till_running req = EchoMsg.new blk = proc do - cq = GRPC::Core::CompletionQueue.new - stub = GRPC::ClientStub.new(@host, cq, :this_channel_is_insecure, + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure, **client_opts) stub.request_response('/unknown', req, marshal, unmarshal) end @@ -325,8 +299,7 @@ describe GRPC::RpcServer do @srv.wait_till_running req = EchoMsg.new blk = proc do - cq = GRPC::Core::CompletionQueue.new - stub = GRPC::ClientStub.new(@host, cq, :this_channel_is_insecure, + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure, **client_opts) stub.request_response('/an_rpc', req, marshal, unmarshal) end @@ -422,7 +395,6 @@ describe GRPC::RpcServer do it 'should return RESOURCE_EXHAUSTED on too many jobs', server: true do opts = { server_args: { a_channel_arg: 'an_arg' }, - completion_queue_override: @server_queue, pool_size: 1, poll_period: 1, max_waiting_requests: 0 @@ -466,7 +438,6 @@ describe GRPC::RpcServer do end before(:each) do server_opts = { - completion_queue_override: @server_queue, poll_period: 1, connect_md_proc: test_md_proc } @@ -502,7 +473,6 @@ describe GRPC::RpcServer do context 'with trailing metadata' do before(:each) do server_opts = { - completion_queue_override: @server_queue, poll_period: 1 } @srv = RpcServer.new(**server_opts) @@ -545,7 +515,7 @@ describe GRPC::RpcServer do op = stub.an_rpc(req, return_op: true, metadata: { k1: 'v1', k2: 'v2' }) expect(op.metadata).to be nil expect(op.execute).to be_a(EchoMsg) - expect(op.metadata).to eq(wanted_trailers) + expect(op.trailing_metadata).to eq(wanted_trailers) @srv.stop t.join end diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb index f3d121a31e..de11c9fedf 100644 --- a/src/ruby/spec/pb/health/checker_spec.rb +++ b/src/ruby/spec/pb/health/checker_spec.rb @@ -168,11 +168,9 @@ describe Grpc::Health::Checker do CheckerStub = Grpc::Health::Checker.rpc_stub_class before(:each) do - @server_queue = GRPC::Core::CompletionQueue.new server_host = '0.0.0.0:0' @client_opts = { channel_override: @ch } server_opts = { - completion_queue_override: @server_queue, poll_period: 1 } @srv = RpcServer.new(**server_opts) diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb index 439b19fb8d..003d8f69d5 100644 --- a/src/ruby/spec/server_spec.rb +++ b/src/ruby/spec/server_spec.rb @@ -43,19 +43,15 @@ describe Server do GRPC::Core::ServerCredentials.new(*load_test_certs) end - before(:each) do - @cq = GRPC::Core::CompletionQueue.new - end - describe '#start' do it 'runs without failing' do - blk = proc { Server.new(@cq, nil).start } + blk = proc { Server.new(nil).start } expect(&blk).to_not raise_error end it 'fails if the server is closed' do - s = Server.new(@cq, nil) - s.close(@cq) + s = Server.new(nil) + s.close expect { s.start }.to raise_error(RuntimeError) end end @@ -63,19 +59,19 @@ describe Server do describe '#destroy' do it 'destroys a server ok' do s = start_a_server - blk = proc { s.destroy(@cq) } + blk = proc { s.destroy } expect(&blk).to_not raise_error end it 'can be called more than once without error' do s = start_a_server begin - blk = proc { s.destroy(@cq) } + blk = proc { s.destroy } expect(&blk).to_not raise_error blk.call expect(&blk).to_not raise_error ensure - s.close(@cq) + s.close end end end @@ -84,7 +80,7 @@ describe Server do it 'closes a server ok' do s = start_a_server begin - blk = proc { s.close(@cq) } + blk = proc { s.close } expect(&blk).to_not raise_error ensure s.close(@cq) @@ -93,7 +89,7 @@ describe Server do it 'can be called more than once without error' do s = start_a_server - blk = proc { s.close(@cq) } + blk = proc { s.close } expect(&blk).to_not raise_error blk.call expect(&blk).to_not raise_error @@ -104,16 +100,16 @@ describe Server do describe 'for insecure servers' do it 'runs without failing' do blk = proc do - s = Server.new(@cq, nil) + s = Server.new(nil) s.add_http2_port('localhost:0', :this_port_is_insecure) - s.close(@cq) + s.close end expect(&blk).to_not raise_error end it 'fails if the server is closed' do - s = Server.new(@cq, nil) - s.close(@cq) + s = Server.new(nil) + s.close blk = proc do s.add_http2_port('localhost:0', :this_port_is_insecure) end @@ -125,16 +121,16 @@ describe Server do let(:cert) { create_test_cert } it 'runs without failing' do blk = proc do - s = Server.new(@cq, nil) + s = Server.new(nil) s.add_http2_port('localhost:0', cert) - s.close(@cq) + s.close end expect(&blk).to_not raise_error end it 'fails if the server is closed' do - s = Server.new(@cq, nil) - s.close(@cq) + s = Server.new(nil) + s.close blk = proc { s.add_http2_port('localhost:0', cert) } expect(&blk).to raise_error(RuntimeError) end @@ -142,8 +138,8 @@ describe Server do end shared_examples '#new' do - it 'takes a completion queue with nil channel args' do - expect { Server.new(@cq, nil) }.to_not raise_error + it 'takes nil channel args' do + expect { Server.new(nil) }.to_not raise_error end it 'does not take a hash with bad keys as channel args' do @@ -194,14 +190,14 @@ describe Server do describe '#new with an insecure channel' do def construct_with_args(a) - proc { Server.new(@cq, a) } + proc { Server.new(a) } end it_behaves_like '#new' end def start_a_server - s = Server.new(@cq, nil) + s = Server.new(nil) s.add_http2_port('0.0.0.0:0', :this_port_is_insecure) s.start s diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb index 68c1bf369d..e457ec09dd 100644 --- a/src/ruby/tools/version.rb +++ b/src/ruby/tools/version.rb @@ -29,6 +29,6 @@ module GRPC module Tools - VERSION = '0.16.0.dev' + VERSION = '1.1.0.dev' end end diff --git a/templates/CMakeLists.txt.template b/templates/CMakeLists.txt.template index 76299cb21b..52e8b866be 100644 --- a/templates/CMakeLists.txt.template +++ b/templates/CMakeLists.txt.template @@ -74,6 +74,13 @@ set(ZLIB_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/zlib) endif() + # Building the protobuf tests require gmock what is not part of a standard protobuf checkout. + # Disable them unless they are explicitly requested from the cmake command line (when we assume + # gmock is downloaded to the right location inside protobuf). + if(NOT protobuf_BUILD_TESTS) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") + endif() + add_subdirectory(<%text>${BORINGSSL_ROOT_DIR}</%text> third_party/boringssl) add_subdirectory(<%text>${PROTOBUF_ROOT_DIR}</%text>/cmake third_party/protobuf) add_subdirectory(<%text>${ZLIB_ROOT_DIR}</%text> third_party/zlib) diff --git a/templates/composer.json.template b/templates/composer.json.template index 7d2029c218..c9ffbbcbd0 100644 --- a/templates/composer.json.template +++ b/templates/composer.json.template @@ -15,8 +15,10 @@ ], "require": { "php": ">=5.5.0", - "datto/protobuf-php": "dev-master", - "google/auth": "v0.7" + "datto/protobuf-php": "dev-master" + }, + "require-dev": { + "google/auth": "v0.9" }, "autoload": { "psr-4": { diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template new file mode 100644 index 0000000000..aefe6e965c --- /dev/null +++ b/templates/gRPC-Core.podspec.template @@ -0,0 +1,157 @@ +%YAML 1.2 +--- | + # GRPC CocoaPods podspec + # This file has been automatically generated from a template file. Please make modifications to + # `templates/gRPC-Core.podspec.template` instead. This file can be regenerated from the template by + # running `tools/buildgen/generate_projects.sh`. + + # Copyright 2015, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + <%! + def grpc_private_files(libs): + out = [] + for lib in libs: + if lib.name in ("grpc", "gpr"): + out += lib.get('headers', []) + out += lib.get('src', []) + return out; + + def grpc_public_headers(libs): + out = [] + for lib in libs: + if lib.name in ("grpc", "gpr"): + out += lib.get('public_headers', []) + return out + + def grpc_private_headers(libs): + out = [] + for lib in libs: + if lib.name in ("grpc", "gpr"): + out += lib.get('headers', []) + return out + + def ruby_multiline_list(files, indent): + return (',\n' + indent*' ').join('\'%s\'' % f for f in files) + %> + Pod::Spec.new do |s| + s.name = 'gRPC-Core' + version = '0.14.0' + s.version = version + s.summary = 'Core cross-platform gRPC library, written in C' + s.homepage = 'http://www.grpc.io' + s.license = 'New BSD' + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + s.source = { + :git => 'https://github.com/grpc/grpc.git', + :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. + :submodules => true, + } + + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + s.requires_arc = false + + name = 'grpc' + + # When creating a dynamic framework, name it grpc.framework instead of gRPC-Core.framework. + # This lets users write their includes like `#include <grpc/grpc.h>` as opposed to `#include + # <gRPC-Core/grpc.h>`. + s.module_name = name + + # When creating a dynamic framework, copy the headers under `include/grpc/` into the root of + # the `Headers/` directory of the framework (i.e., not under `Headers/include/grpc`). + # + # TODO(jcanizales): Debug why this doesn't work on macOS. + s.header_mappings_dir = 'include/grpc' + + # The above has an undesired effect when creating a static library: It forces users to write + # includes like `#include <gRPC-Core/grpc.h>`. `s.header_dir` adds a path prefix to that, and + # because Cocoapods lets omit the pod name when including headers of static libraries, the + # following lets users write `#include <grpc/grpc.h>`. + s.header_dir = name + + # The module map created automatically by Cocoapods doesn't work for C libraries like gRPC-Core. + s.module_map = 'include/grpc/module.modulemap' + + # To compile the library, we need the user headers search path (quoted includes) to point to the + # root of the repo, and the system headers search path (angled includes) to point to `include/`. + # Cocoapods effectively clones the repo under `<Podfile dir>/Pods/gRPC-Core/`, and sets a build + # variable called `$(PODS_ROOT)` to `<Podfile dir>/Pods/`, so we use that. + # + # Relying on the file structure under $(PODS_ROOT) isn't officially supported in Cocoapods, as it + # is taken as an implementation detail. We've asked for an alternative, and have been told that + # what we're doing should keep working: https://github.com/CocoaPods/CocoaPods/issues/4386 + # + # The `src_root` value of `$(PODS_ROOT)/gRPC-Core` assumes Cocoapods is installing this pod from + # its remote repo. For local development of this library, enabled by using `:path` in the Podfile, + # that assumption is wrong. In such case, the following settings need to be reset with the + # appropriate value of `src_root`. This can be accomplished in the `pre_install` hook of the + # Podfile; see `src/objective-c/tests/Podfile` for an example. + src_root = '$(PODS_ROOT)/gRPC-Core' + s.pod_target_xcconfig = { + 'GRPC_SRC_ROOT' => src_root, + 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', + 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', + # If we don't set these two settings, `include/grpc/support/time.h` and + # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the + # build. + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } + + # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its + # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't + # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in + # practice). Because we need our `header_mappings_dir` to be `include/grpc/` for the reason + # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one + # for public headers and the other for implementation. Each gets its own `header_mappings_dir`, + # making the linter happy. + # + # The list of source files is generated by a template: `templates/gRPC-Core.podspec.template`. It + # can be regenerated from the template by running `tools/buildgen/generate_projects.sh`. + s.subspec 'Interface' do |ss| + ss.header_mappings_dir = 'include/grpc' + + ss.source_files = ${ruby_multiline_list(grpc_public_headers(libs), 22)} + end + s.subspec 'Implementation' do |ss| + ss.header_mappings_dir = '.' + ss.libraries = 'z' + ss.dependency "#{s.name}/Interface", version + ss.dependency 'BoringSSL', '~> 4.0' + + # To save you from scrolling, this is the last part of the podspec. + ss.source_files = ${ruby_multiline_list(grpc_private_files(libs), 22)} + + ss.private_header_files = ${ruby_multiline_list(grpc_private_headers(libs), 30)} + end + end diff --git a/templates/package.json.template b/templates/package.json.template index 9d19ca0629..f68f64d047 100644 --- a/templates/package.json.template +++ b/templates/package.json.template @@ -61,7 +61,6 @@ "files": [ "LICENSE", "src/node/README.md", - "src/node/health_check", "src/proto", "etc", "src/node/index.js", diff --git a/templates/src/node/health_check/package.json.template b/templates/src/node/health_check/package.json.template new file mode 100644 index 0000000000..1248ced1e1 --- /dev/null +++ b/templates/src/node/health_check/package.json.template @@ -0,0 +1,31 @@ +%YAML 1.2 +--- | + { + "name": "grpc-health-check", + "version": "${settings.node_version}", + "author": "Google Inc.", + "description": "Health check service for use with gRPC", + "repository": { + "type": "git", + "url": "https://github.com/grpc/grpc.git" + }, + "bugs": "https://github.com/grpc/grpc/issues", + "contributors": [ + { + "name": "Michael Lumish", + "email": "mlumish@google.com" + } + ], + "dependencies": { + "grpc": "^0.15.0", + "lodash": "^3.9.3", + "google-protobuf": "^3.0.0-alpha.5" + }, + "files": { + "LICENSE", + "health.js", + "v1" + }, + "main": "src/node/index.js", + "license": "BSD-3-Clause" + } diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template index 84fa5e62bf..d83bccad1d 100644 --- a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template +++ b/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template @@ -33,29 +33,9 @@ * */ + /* TODO(atash) remove cruft */ #include <grpc/support/port_platform.h> #include "imports.generated.h" - #ifdef GPR_WINDOWS - - %for api in c_apis: - ${api.name}_type ${api.name}_import; - %endfor - - #ifdef __cplusplus - extern "C" { - #endif /* __cpluslus */ - - void pygrpc_load_imports(HMODULE library) { - %for api in c_apis: - ${api.name}_import = (${api.name}_type) GetProcAddress(library, "${api.name}"); - %endfor - } - - #ifdef __cplusplus - } - #endif /* __cpluslus */ - - #endif /* !GPR_WINDOWS */ diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template index d0f60dc0a5..b85bc3dbd8 100644 --- a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template +++ b/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template @@ -33,37 +33,12 @@ * */ + /* TODO(atash) remove cruft */ #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #include <grpc/support/port_platform.h> - #ifdef GPR_WINDOWS - - #include <windows.h> - - %for header in sorted(set(api.header for api in c_apis)): - #include <${'/'.join(header.split('/')[1:])}> - %endfor - - %for api in c_apis: - typedef ${api.return_type}(*${api.name}_type)(${api.arguments}); - extern ${api.name}_type ${api.name}_import; - #define ${api.name} ${api.name}_import - %endfor - - #ifdef __cplusplus - extern "C" { - #endif /* __cpluslus */ - - void pygrpc_load_imports(HMODULE library); - - #ifdef __cplusplus - } - #endif /* __cpluslus */ - - #else /* !GPR_WINDOWS */ - #include <grpc/byte_buffer.h> #include <grpc/byte_buffer_reader.h> #include <grpc/compression.h> @@ -74,6 +49,4 @@ #include <grpc/support/time.h> #include <grpc/status.h> - #endif /* !GPR_WINDOWS */ - #endif diff --git a/test/core/end2end/cq_verifier.c b/test/core/end2end/cq_verifier.c index 8e9fa70b0e..890309c44a 100644 --- a/test/core/end2end/cq_verifier.c +++ b/test/core/end2end/cq_verifier.c @@ -149,7 +149,8 @@ int byte_buffer_eq_string(grpc_byte_buffer *bb, const char *str) { grpc_byte_buffer *rbb; int res; - grpc_byte_buffer_reader_init(&reader, bb); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, bb) && + "Couldn't init byte buffer reader"); rbb = grpc_raw_byte_buffer_from_reader(&reader); res = byte_buffer_eq_slice(rbb, gpr_slice_from_copied_string(str)); grpc_byte_buffer_reader_destroy(&reader); diff --git a/test/core/surface/byte_buffer_reader_test.c b/test/core/surface/byte_buffer_reader_test.c index 9c6734e179..1ab1a06211 100644 --- a/test/core/surface/byte_buffer_reader_test.c +++ b/test/core/surface/byte_buffer_reader_test.c @@ -59,7 +59,8 @@ static void test_read_one_slice(void) { slice = gpr_slice_from_copied_string("test"); buffer = grpc_raw_byte_buffer_create(&slice, 1); gpr_slice_unref(slice); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); first_code = grpc_byte_buffer_reader_next(&reader, &first_slice); GPR_ASSERT(first_code != 0); GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0); @@ -81,7 +82,8 @@ static void test_read_one_slice_malloc(void) { memcpy(GPR_SLICE_START_PTR(slice), "test", 4); buffer = grpc_raw_byte_buffer_create(&slice, 1); gpr_slice_unref(slice); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); first_code = grpc_byte_buffer_reader_next(&reader, &first_slice); GPR_ASSERT(first_code != 0); GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0); @@ -102,7 +104,8 @@ static void test_read_none_compressed_slice(void) { slice = gpr_slice_from_copied_string("test"); buffer = grpc_raw_byte_buffer_create(&slice, 1); gpr_slice_unref(slice); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); first_code = grpc_byte_buffer_reader_next(&reader, &first_slice); GPR_ASSERT(first_code != 0); GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0); @@ -112,6 +115,20 @@ static void test_read_none_compressed_slice(void) { grpc_byte_buffer_destroy(buffer); } +static void test_read_corrupted_slice(void) { + gpr_slice slice; + grpc_byte_buffer *buffer; + grpc_byte_buffer_reader reader; + + LOG_TEST("test_read_corrupted_slice"); + slice = gpr_slice_from_copied_string("test"); + buffer = grpc_raw_byte_buffer_create(&slice, 1); + buffer->data.raw.compression = GRPC_COMPRESS_GZIP; /* lies! */ + gpr_slice_unref(slice); + GPR_ASSERT(!grpc_byte_buffer_reader_init(&reader, buffer)); + grpc_byte_buffer_destroy(buffer); +} + static void read_compressed_slice(grpc_compression_algorithm algorithm, size_t input_size) { gpr_slice input_slice; @@ -132,7 +149,8 @@ static void read_compressed_slice(grpc_compression_algorithm algorithm, buffer = grpc_raw_compressed_byte_buffer_create(sliceb_out.slices, sliceb_out.count, algorithm); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); while (grpc_byte_buffer_reader_next(&reader, &read_slice)) { GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(read_slice), @@ -170,7 +188,8 @@ static void test_byte_buffer_from_reader(void) { memcpy(GPR_SLICE_START_PTR(slice), "test", 4); buffer = grpc_raw_byte_buffer_create(&slice, 1); gpr_slice_unref(slice); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); buffer_from_reader = grpc_raw_byte_buffer_from_reader(&reader); GPR_ASSERT(buffer->type == buffer_from_reader->type); @@ -206,7 +225,8 @@ static void test_readall(void) { gpr_slice_unref(slices[0]); gpr_slice_unref(slices[1]); - grpc_byte_buffer_reader_init(&reader, buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); slice_out = grpc_byte_buffer_reader_readall(&reader); GPR_ASSERT(GPR_SLICE_LENGTH(slice_out) == 512 + 1024); @@ -241,7 +261,8 @@ static void test_byte_buffer_copy(void) { gpr_slice_unref(slices[1]); copied_buffer = grpc_byte_buffer_copy(buffer); - grpc_byte_buffer_reader_init(&reader, copied_buffer); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) && + "Couldn't init byte buffer reader"); slice_out = grpc_byte_buffer_reader_readall(&reader); GPR_ASSERT(GPR_SLICE_LENGTH(slice_out) == 512 + 1024); @@ -260,6 +281,7 @@ int main(int argc, char **argv) { test_read_none_compressed_slice(); test_read_gzip_compressed_slice(); test_read_deflate_compressed_slice(); + test_read_corrupted_slice(); test_byte_buffer_from_reader(); test_byte_buffer_copy(); test_readall(); diff --git a/test/core/surface/server_test.c b/test/core/surface/server_test.c index 6dd8a435aa..3fd1c2c266 100644 --- a/test/core/surface/server_test.c +++ b/test/core/surface/server_test.c @@ -139,7 +139,7 @@ void test_bind_server_to_addr(const char *host, bool secure) { } static int external_dns_works(const char *host) { - grpc_resolved_addresses *res; + grpc_resolved_addresses *res = NULL; grpc_error *error = grpc_blocking_resolve_address(host, "80", &res); GRPC_ERROR_UNREF(error); if (res != NULL) { diff --git a/test/core/transport/chttp2/status_conversion_test.c b/test/core/transport/chttp2/status_conversion_test.c index e6fc785728..f5a5cd1395 100644 --- a/test/core/transport/chttp2/status_conversion_test.c +++ b/test/core/transport/chttp2/status_conversion_test.c @@ -37,8 +37,8 @@ #define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \ GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_error(a) == (b)) -#define HTTP2_ERROR_TO_GRPC_STATUS(a, b) \ - GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a) == (b)) +#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \ + GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a, deadline) == (b)) #define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \ GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_status(a) == (b)) #define HTTP2_STATUS_TO_GRPC_STATUS(a, b) \ @@ -54,8 +54,7 @@ int main(int argc, char **argv) { GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_UNKNOWN, GRPC_CHTTP2_INTERNAL_ERROR); GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_INVALID_ARGUMENT, GRPC_CHTTP2_INTERNAL_ERROR); - GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED, - GRPC_CHTTP2_INTERNAL_ERROR); + GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED, GRPC_CHTTP2_CANCEL); GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_NOT_FOUND, GRPC_CHTTP2_INTERNAL_ERROR); GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_ALREADY_EXISTS, GRPC_CHTTP2_INTERNAL_ERROR); @@ -95,25 +94,60 @@ int main(int argc, char **argv) { GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_UNAVAILABLE, 200); GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_DATA_LOSS, 200); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, + const gpr_timespec before_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, before_deadline, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, before_deadline, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, before_deadline, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, before_deadline, GRPC_STATUS_UNAVAILABLE); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, GRPC_STATUS_CANCELLED); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, before_deadline, + GRPC_STATUS_CANCELLED); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, before_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, before_deadline, + GRPC_STATUS_RESOURCE_EXHAUSTED); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, before_deadline, + GRPC_STATUS_PERMISSION_DENIED); + + const gpr_timespec after_deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, after_deadline, + GRPC_STATUS_UNAVAILABLE); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, after_deadline, + GRPC_STATUS_DEADLINE_EXCEEDED); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, after_deadline, + GRPC_STATUS_INTERNAL); + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, after_deadline, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, GRPC_STATUS_INTERNAL); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, after_deadline, GRPC_STATUS_RESOURCE_EXHAUSTED); - HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, + HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, after_deadline, GRPC_STATUS_PERMISSION_DENIED); HTTP2_STATUS_TO_GRPC_STATUS(200, GRPC_STATUS_OK); diff --git a/test/cpp/interop/metrics_client.cc b/test/cpp/interop/metrics_client.cc index 7a0cb994df..179de30805 100644 --- a/test/cpp/interop/metrics_client.cc +++ b/test/cpp/interop/metrics_client.cc @@ -56,6 +56,9 @@ using grpc::testing::GaugeResponse; using grpc::testing::MetricsService; using grpc::testing::MetricsServiceImpl; +// Do not log anything +void BlackholeLogger(gpr_log_func_args* args) {} + // Prints the values of all Gauges (unless total_only is set to 'true' in which // case this only prints the sum of all gauge values). bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only, @@ -76,21 +79,21 @@ bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only, while (reader->Read(&gauge_response)) { if (gauge_response.value_case() == GaugeResponse::kLongValue) { if (!total_only) { - gpr_log(GPR_INFO, "%s: %lld", gauge_response.name().c_str(), - gauge_response.long_value()); + std::cout << gauge_response.name() << ": " + << gauge_response.long_value() << std::endl; } overall_qps += gauge_response.long_value(); } else { - gpr_log(GPR_INFO, "Gauge %s is not a long value", - gauge_response.name().c_str()); + std::cout << "Gauge '" << gauge_response.name() << "' is not long valued" + << std::endl; } } - gpr_log(GPR_INFO, "%ld", overall_qps); + std::cout << overall_qps << std::endl; const grpc::Status status = reader->Finish(); if (!status.ok()) { - gpr_log(GPR_ERROR, "Error in getting metrics from the client"); + std::cout << "Error in getting metrics from the client" << std::endl; } return status.ok(); @@ -99,14 +102,10 @@ bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only, int main(int argc, char** argv) { grpc::testing::InitTest(&argc, &argv, true); - // Make sure server_addresses flag is not empty - if (FLAGS_metrics_server_address.empty()) { - gpr_log( - GPR_ERROR, - "Cannot connect to the Metrics server. Please pass the address of the" - "metrics server to connect to via the 'metrics_server_address' flag"); - return 1; - } + // The output of metrics client is in some cases programatically parsed (for + // example by the stress test framework). So, we do not want any of the log + // from the grpc library appearing on stdout. + gpr_set_log_function(BlackholeLogger); std::shared_ptr<grpc::Channel> channel(grpc::CreateChannel( FLAGS_metrics_server_address, grpc::InsecureChannelCredentials())); diff --git a/third_party/protobuf b/third_party/protobuf -Subproject 3470b6895aa659b7559ed678e029a5338e535f1 +Subproject bdeb215cab2985195325fcd5e70c3fa751f46e0 diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py index 4b1e7fcd58..79c40717dd 100644 --- a/tools/distrib/python/grpcio_tools/grpc_version.py +++ b/tools/distrib/python/grpcio_tools/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py index afb6063906..d804f34fc6 100644 --- a/tools/distrib/python/grpcio_tools/setup.py +++ b/tools/distrib/python/grpcio_tools/setup.py @@ -32,6 +32,7 @@ import errno import os import os.path import pkg_resources +import platform import shlex import shutil import sys @@ -45,6 +46,9 @@ from setuptools.command import build_ext os.chdir(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.abspath('.')) +import protoc_lib_deps +import grpc_version + PY3 = sys.version_info.major == 3 # There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are @@ -60,8 +64,9 @@ EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools' GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto' -import protoc_lib_deps -import grpc_version +DEFINE_MACROS = (('HAVE_PTHREAD', 1),) +if "win32" in sys.platform and '64bit' in platform.architecture()[0]: + DEFINE_MACROS += (('MS_WIN64', 1),) # By default, Python3 distutils enforces compatibility of # c plugins (.so files) with the OSX version Python3 was built with. @@ -108,9 +113,9 @@ def protoc_ext_module(): protoc_lib_deps.CC_INCLUDE, ], language='c++', - define_macros=[('HAVE_PTHREAD', 1)], - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, + define_macros=list(DEFINE_MACROS), + extra_compile_args=list(EXTRA_COMPILE_ARGS), + extra_link_args=list(EXTRA_LINK_ARGS), ) return plugin_ext @@ -129,7 +134,7 @@ setuptools.setup( namespace_packages=['grpc'], install_requires=[ 'protobuf>=3.0.0a3', - 'grpcio>=0.14.0', + 'grpcio>=0.15.0', ], package_data=package_data(), ) diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index de7acd7777..a2415e1217 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 76bb3b6c59..945298b964 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.c++.internal.orig b/tools/doxygen/Doxyfile.c++.internal.orig deleted file mode 100644 index c214b3d3c8..0000000000 --- a/tools/doxygen/Doxyfile.c++.internal.orig +++ /dev/null @@ -1,2505 +0,0 @@ - - -# Doxyfile 1.8.9.1 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "GRPC C++" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = 0.15.0-dev - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = doc/ref/c++.internal - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = YES - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 2 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = YES - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if <section_label> ... \endif and \cond <section_label> -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = include/grpc++/alarm.h \ -include/grpc++/channel.h \ -include/grpc++/client_context.h \ -include/grpc++/completion_queue.h \ -include/grpc++/create_channel.h \ -include/grpc++/generic/async_generic_service.h \ -include/grpc++/generic/generic_stub.h \ -include/grpc++/grpc++.h \ -include/grpc++/impl/call.h \ -include/grpc++/impl/client_unary_call.h \ -include/grpc++/impl/codegen/core_codegen.h \ -include/grpc++/impl/grpc_library.h \ -include/grpc++/impl/method_handler_impl.h \ -include/grpc++/impl/rpc_method.h \ -include/grpc++/impl/rpc_service_method.h \ -include/grpc++/impl/serialization_traits.h \ -include/grpc++/impl/server_builder_option.h \ -include/grpc++/impl/server_builder_plugin.h \ -include/grpc++/impl/server_initializer.h \ -include/grpc++/impl/service_type.h \ -include/grpc++/impl/sync.h \ -include/grpc++/impl/sync_cxx11.h \ -include/grpc++/impl/sync_no_cxx11.h \ -include/grpc++/impl/thd.h \ -include/grpc++/impl/thd_cxx11.h \ -include/grpc++/impl/thd_no_cxx11.h \ -include/grpc++/security/auth_context.h \ -include/grpc++/security/auth_metadata_processor.h \ -include/grpc++/security/credentials.h \ -include/grpc++/security/server_credentials.h \ -include/grpc++/server.h \ -include/grpc++/server_builder.h \ -include/grpc++/server_context.h \ -include/grpc++/support/async_stream.h \ -include/grpc++/support/async_unary_call.h \ -include/grpc++/support/byte_buffer.h \ -include/grpc++/support/channel_arguments.h \ -include/grpc++/support/config.h \ -include/grpc++/support/slice.h \ -include/grpc++/support/status.h \ -include/grpc++/support/status_code_enum.h \ -include/grpc++/support/string_ref.h \ -include/grpc++/support/stub_options.h \ -include/grpc++/support/sync_stream.h \ -include/grpc++/support/time.h \ -include/grpc++/impl/codegen/async_stream.h \ -include/grpc++/impl/codegen/async_unary_call.h \ -include/grpc++/impl/codegen/call.h \ -include/grpc++/impl/codegen/call_hook.h \ -include/grpc++/impl/codegen/channel_interface.h \ -include/grpc++/impl/codegen/client_context.h \ -include/grpc++/impl/codegen/client_unary_call.h \ -include/grpc++/impl/codegen/completion_queue.h \ -include/grpc++/impl/codegen/completion_queue_tag.h \ -include/grpc++/impl/codegen/config.h \ -include/grpc++/impl/codegen/core_codegen_interface.h \ -include/grpc++/impl/codegen/create_auth_context.h \ -include/grpc++/impl/codegen/grpc_library.h \ -include/grpc++/impl/codegen/method_handler_impl.h \ -include/grpc++/impl/codegen/rpc_method.h \ -include/grpc++/impl/codegen/rpc_service_method.h \ -include/grpc++/impl/codegen/security/auth_context.h \ -include/grpc++/impl/codegen/serialization_traits.h \ -include/grpc++/impl/codegen/server_context.h \ -include/grpc++/impl/codegen/server_interface.h \ -include/grpc++/impl/codegen/service_type.h \ -include/grpc++/impl/codegen/status.h \ -include/grpc++/impl/codegen/status_code_enum.h \ -include/grpc++/impl/codegen/string_ref.h \ -include/grpc++/impl/codegen/stub_options.h \ -include/grpc++/impl/codegen/sync.h \ -include/grpc++/impl/codegen/sync_cxx11.h \ -include/grpc++/impl/codegen/sync_no_cxx11.h \ -include/grpc++/impl/codegen/sync_stream.h \ -include/grpc++/impl/codegen/time.h \ -include/grpc/impl/codegen/byte_buffer.h \ -include/grpc/impl/codegen/byte_buffer_reader.h \ -include/grpc/impl/codegen/compression_types.h \ -include/grpc/impl/codegen/connectivity_state.h \ -include/grpc/impl/codegen/grpc_types.h \ -include/grpc/impl/codegen/propagation_bits.h \ -include/grpc/impl/codegen/status.h \ -include/grpc/impl/codegen/alloc.h \ -include/grpc/impl/codegen/atm.h \ -include/grpc/impl/codegen/atm_gcc_atomic.h \ -include/grpc/impl/codegen/atm_gcc_sync.h \ -include/grpc/impl/codegen/atm_windows.h \ -include/grpc/impl/codegen/log.h \ -include/grpc/impl/codegen/port_platform.h \ -include/grpc/impl/codegen/slice.h \ -include/grpc/impl/codegen/slice_buffer.h \ -include/grpc/impl/codegen/sync.h \ -include/grpc/impl/codegen/sync_generic.h \ -include/grpc/impl/codegen/sync_posix.h \ -include/grpc/impl/codegen/sync_windows.h \ -include/grpc/impl/codegen/time.h \ -<<<<<<< HEAD -include/grpc++/impl/codegen/config.h \ -include/grpc++/impl/codegen/config_protobuf.h \ -include/grpc++/support/config.h \ -include/grpc++/support/config_protobuf.h \ -include/grpc++/impl/codegen/core_codegen.h \ -======= ->>>>>>> d30d4e279c4a63effaa6e912fc00bd4ad96054c7 -src/cpp/client/secure_credentials.h \ -src/cpp/common/secure_auth_context.h \ -src/cpp/server/secure_server_credentials.h \ -src/cpp/client/create_channel_internal.h \ -src/cpp/server/dynamic_thread_pool.h \ -src/cpp/server/thread_pool_interface.h \ -src/cpp/client/secure_credentials.cc \ -src/cpp/common/auth_property_iterator.cc \ -src/cpp/common/secure_auth_context.cc \ -src/cpp/common/secure_channel_arguments.cc \ -src/cpp/common/secure_create_auth_context.cc \ -src/cpp/server/secure_server_credentials.cc \ -src/cpp/client/channel.cc \ -src/cpp/client/client_context.cc \ -src/cpp/client/create_channel.cc \ -src/cpp/client/create_channel_internal.cc \ -src/cpp/client/credentials.cc \ -src/cpp/client/generic_stub.cc \ -src/cpp/client/insecure_credentials.cc \ -src/cpp/common/channel_arguments.cc \ -src/cpp/common/completion_queue.cc \ -src/cpp/common/core_codegen.cc \ -src/cpp/common/rpc_method.cc \ -src/cpp/server/async_generic_service.cc \ -src/cpp/server/create_default_thread_pool.cc \ -src/cpp/server/dynamic_thread_pool.cc \ -src/cpp/server/insecure_server_credentials.cc \ -src/cpp/server/server.cc \ -src/cpp/server/server_builder.cc \ -src/cpp/server/server_context.cc \ -src/cpp/server/server_credentials.cc \ -src/cpp/util/byte_buffer.cc \ -src/cpp/util/slice.cc \ -src/cpp/util/status.cc \ -src/cpp/util/string_ref.cc \ -src/cpp/util/time.cc \ -src/cpp/codegen/codegen_init.cc - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = NO - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# <filter> <input-file> -# -# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use <access key> + S -# (what the <access key> is depends on the OS and browser, but it is typically -# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down -# key> to jump into the search results window, the results can be navigated -# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel -# the search. The filter options can be selected when the cursor is inside the -# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> -# to select a filter and <Enter> or <escape> to activate or cancel the filter -# option. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -SEARCHENGINE = YES - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. There -# are two flavors of web server based searching depending on the EXTERNAL_SEARCH -# setting. When disabled, doxygen will generate a PHP script for searching and -# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing -# and searching needs to be provided by external tools. See the section -# "External Indexing and Searching" for details. -# The default value is: NO. -# This tag requires that the tag SEARCHENGINE is set to YES. - -SERVER_BASED_SEARCH = NO - -# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP -# script for searching. Instead the search results are written to an XML file -# which needs to be processed by an external indexer. Doxygen will invoke an -# external search engine pointed to by the SEARCHENGINE_URL option to obtain the -# search results. -# -# Doxygen ships with an example indexer (doxyindexer) and search engine -# (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). -# -# See the section "External Indexing and Searching" for details. -# The default value is: NO. -# This tag requires that the tag SEARCHENGINE is set to YES. - -EXTERNAL_SEARCH = NO - -# The SEARCHENGINE_URL should point to a search engine hosted by a web server -# which will return the search results when EXTERNAL_SEARCH is enabled. -# -# Doxygen ships with an example indexer (doxyindexer) and search engine -# (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). See the section "External Indexing and -# Searching" for details. -# This tag requires that the tag SEARCHENGINE is set to YES. - -SEARCHENGINE_URL = - -# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed -# search data is written to a file for indexing by an external tool. With the -# SEARCHDATA_FILE tag the name of this file can be specified. -# The default file is: searchdata.xml. -# This tag requires that the tag SEARCHENGINE is set to YES. - -SEARCHDATA_FILE = searchdata.xml - -# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the -# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is -# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple -# projects and redirect the results back to the right project. -# This tag requires that the tag SEARCHENGINE is set to YES. - -EXTERNAL_SEARCH_ID = - -# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen -# projects other than the one defined by this configuration file, but that are -# all added to the same external search index. Each project needs to have a -# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of -# to a relative location where the documentation can be found. The format is: -# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... -# This tag requires that the tag SEARCHENGINE is set to YES. - -EXTRA_SEARCH_MAPPINGS = - -#--------------------------------------------------------------------------- -# Configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. -# The default value is: YES. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: latex. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. -# -# Note that when enabling USE_PDFLATEX this option is only used for generating -# bitmaps for formulas in the HTML output, but not in the Makefile that is -# written to the output directory. -# The default file is: latex. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate -# index for LaTeX. -# The default file is: makeindex. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX -# documents. This may be useful for small projects and may help to save some -# trees in general. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used by the -# printer. -# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x -# 14 inches) and executive (7.25 x 10.5 inches). -# The default value is: a4. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -PAPER_TYPE = a4 - -# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names -# that should be included in the LaTeX output. To get the times font for -# instance you can specify -# EXTRA_PACKAGES=times -# If left blank no extra packages will be included. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the -# generated LaTeX document. The header should contain everything until the first -# chapter. If it is left blank doxygen will generate a standard header. See -# section "Doxygen usage" for information on how to let doxygen write the -# default header to a separate file. -# -# Note: Only use a user-defined header if you know what you are doing! The -# following commands have a special meaning inside the header: $title, -# $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empty -# string, for the replacement values of the other commands the user is referred -# to HTML_HEADER. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the -# generated LaTeX document. The footer should contain everything after the last -# chapter. If it is left blank doxygen will generate a standard footer. See -# LATEX_HEADER for more information on how to generate a default footer and what -# special commands can be used inside the footer. -# -# Note: Only use a user-defined footer if you know what you are doing! -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_FOOTER = - -# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# LaTeX style sheets that are included after the standard style sheets created -# by doxygen. Using this option one can overrule certain style aspects. Doxygen -# will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_EXTRA_STYLESHEET = - -# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the LATEX_OUTPUT output -# directory. Note that the files will be copied as-is; there are no commands or -# markers available. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_EXTRA_FILES = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is -# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will -# contain links (just like the HTML output) instead of page references. This -# makes the output suitable for online browsing using a PDF viewer. -# The default value is: YES. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES, to get a -# higher quality PDF documentation. -# The default value is: YES. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode -# command to the generated LaTeX files. This will instruct LaTeX to keep running -# if errors occur, instead of asking the user for help. This option is also used -# when generating formulas in HTML. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_BATCHMODE = NO - -# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the -# index chapters (such as File Index, Compound Index, etc.) in the output. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_HIDE_INDICES = NO - -# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source -# code with syntax highlighting in the LaTeX output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_SOURCE_CODE = NO - -# The LATEX_BIB_STYLE tag can be used to specify the style to use for the -# bibliography, e.g. plainnat, or ieeetr. See -# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. -# The default value is: plain. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_BIB_STYLE = plain - -#--------------------------------------------------------------------------- -# Configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The -# RTF output is optimized for Word 97 and may not look too pretty with other RTF -# readers/editors. -# The default value is: NO. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: rtf. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF -# documents. This may be useful for small projects and may help to save some -# trees in general. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will -# contain hyperlink fields. The RTF file will contain links (just like the HTML -# output) instead of page references. This makes the output suitable for online -# browsing using Word or some other Word compatible readers that support those -# fields. -# -# Note: WordPad (write) and others do not support links. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's config -# file, i.e. a series of assignments. You only have to provide replacements, -# missing definitions are set to their default value. -# -# See also section "Doxygen usage" for information on how to generate the -# default style sheet that doxygen normally uses. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an RTF document. Syntax is -# similar to doxygen's config file. A template extensions file can be generated -# using doxygen -e rtf extensionFile. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_EXTENSIONS_FILE = - -# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code -# with syntax highlighting in the RTF output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_SOURCE_CODE = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for -# classes and files. -# The default value is: NO. - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. A directory man3 will be created inside the directory specified by -# MAN_OUTPUT. -# The default directory is: man. -# This tag requires that the tag GENERATE_MAN is set to YES. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to the generated -# man pages. In case the manual section does not start with a number, the number -# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is -# optional. -# The default value is: .3. -# This tag requires that the tag GENERATE_MAN is set to YES. - -MAN_EXTENSION = .3 - -# The MAN_SUBDIR tag determines the name of the directory created within -# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by -# MAN_EXTENSION with the initial . removed. -# This tag requires that the tag GENERATE_MAN is set to YES. - -MAN_SUBDIR = - -# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it -# will generate one additional man file for each entity documented in the real -# man page(s). These additional files only source the real man page, but without -# them the man command would be unable to find the correct page. -# The default value is: NO. -# This tag requires that the tag GENERATE_MAN is set to YES. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that -# captures the structure of the code including all documentation. -# The default value is: NO. - -GENERATE_XML = NO - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: xml. -# This tag requires that the tag GENERATE_XML is set to YES. - -XML_OUTPUT = xml - -# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program -# listings (including syntax highlighting and cross-referencing information) to -# the XML output. Note that enabling this will significantly increase the size -# of the XML output. -# The default value is: YES. -# This tag requires that the tag GENERATE_XML is set to YES. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the DOCBOOK output -#--------------------------------------------------------------------------- - -# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files -# that can be used to generate PDF. -# The default value is: NO. - -GENERATE_DOCBOOK = NO - -# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in -# front of it. -# The default directory is: docbook. -# This tag requires that the tag GENERATE_DOCBOOK is set to YES. - -DOCBOOK_OUTPUT = docbook - -# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the -# program listings (including syntax highlighting and cross-referencing -# information) to the DOCBOOK output. Note that enabling this will significantly -# increase the size of the DOCBOOK output. -# The default value is: NO. -# This tag requires that the tag GENERATE_DOCBOOK is set to YES. - -DOCBOOK_PROGRAMLISTING = NO - -#--------------------------------------------------------------------------- -# Configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an -# AutoGen Definitions (see http://autogen.sf.net) file that captures the -# structure of the code including all documentation. Note that this feature is -# still experimental and incomplete at the moment. -# The default value is: NO. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module -# file that captures the structure of the code including all documentation. -# -# Note that this feature is still experimental and incomplete at the moment. -# The default value is: NO. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary -# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI -# output from the Perl module output. -# The default value is: NO. -# This tag requires that the tag GENERATE_PERLMOD is set to YES. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely -# formatted so it can be parsed by a human reader. This is useful if you want to -# understand what is going on. On the other hand, if this tag is set to NO, the -# size of the Perl module output will be much smaller and Perl will parse it -# just the same. -# The default value is: YES. -# This tag requires that the tag GENERATE_PERLMOD is set to YES. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file are -# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful -# so different doxyrules.make files included by the same Makefile don't -# overwrite each other's variables. -# This tag requires that the tag GENERATE_PERLMOD is set to YES. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all -# C-preprocessor directives found in the sources and include files. -# The default value is: YES. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names -# in the source code. If set to NO, only conditional compilation will be -# performed. Macro expansion can be done in a controlled way by setting -# EXPAND_ONLY_PREDEF to YES. -# The default value is: NO. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then -# the macro expansion is limited to the macros specified with the PREDEFINED and -# EXPAND_AS_DEFINED tags. -# The default value is: NO. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES, the include files in the -# INCLUDE_PATH will be searched if a #include is found. -# The default value is: YES. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by the -# preprocessor. -# This tag requires that the tag SEARCH_INCLUDES is set to YES. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will be -# used. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that are -# defined before the preprocessor is started (similar to the -D option of e.g. -# gcc). The argument of the tag is a list of macros of the form: name or -# name=definition (no spaces). If the definition and the "=" are omitted, "=1" -# is assumed. To prevent a macro definition from being undefined via #undef or -# recursively expanded use the := operator instead of the = operator. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -PREDEFINED = GRPC_FINAL= GRPC_OVERIDE= - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this -# tag can be used to specify a list of macro names that should be expanded. The -# macro definition that is found in the sources will be used. Use the PREDEFINED -# tag if you want to use a different macro definition that overrules the -# definition found in the source code. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will -# remove all references to function-like macros that are alone on a line, have -# an all uppercase name, and do not end with a semicolon. Such function macros -# are typically used for boiler-plate code, and will confuse the parser if not -# removed. -# The default value is: YES. -# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tag files. For each tag -# file the location of the external documentation should be added. The format of -# a tag file without this location is as follows: -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where loc1 and loc2 can be relative or absolute paths or URLs. See the -# section "Linking to external documentation" for more information about the use -# of tag files. -# Note: Each tag file must have a unique name (where the name does NOT include -# the path). If a tag file is not located in the directory in which doxygen is -# run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create a -# tag file that is based on the input files it reads. See section "Linking to -# external documentation" for more information about the usage of tag files. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES, all external class will be listed in -# the class index. If set to NO, only the inherited external classes will be -# listed. -# The default value is: NO. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will be -# listed. -# The default value is: YES. - -EXTERNAL_GROUPS = YES - -# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in -# the related pages index. If set to NO, only the current project's pages will -# be listed. -# The default value is: YES. - -EXTERNAL_PAGES = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram -# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to -# NO turns the diagrams off. Note that this option also works with HAVE_DOT -# disabled, but it is recommended to install and use dot, since it yields more -# powerful graphs. -# The default value is: YES. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# You can include diagrams made with dia in doxygen documentation. Doxygen will -# then run dia to produce the diagram and insert it in the documentation. The -# DIA_PATH tag allows you to specify the directory where the dia binary resides. -# If left empty dia is assumed to be found in the default search path. - -DIA_PATH = - -# If set to YES the inheritance and collaboration graphs will hide inheritance -# and usage relations if the target is undocumented or is not a class. -# The default value is: YES. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz (see: -# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent -# Bell Labs. The other options in this section have no effect if this option is -# set to NO -# The default value is: NO. - -HAVE_DOT = YES - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed -# to run in parallel. When set to 0 doxygen will base this on the number of -# processors available in the system. You can set it explicitly to a value -# larger than 0 to get control over the balance between CPU load and processing -# speed. -# Minimum value: 0, maximum value: 32, default value: 0. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_NUM_THREADS = 0 - -# When you want a differently looking font in the dot files that doxygen -# generates you can specify the font name using DOT_FONTNAME. You need to make -# sure dot is able to find the font, which can be done by putting it in a -# standard location or by setting the DOTFONTPATH environment variable or by -# setting DOT_FONTPATH to the directory containing the font. -# The default value is: Helvetica. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of -# dot graphs. -# Minimum value: 4, maximum value: 24, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the default font as specified with -# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set -# the path where dot can find it using this tag. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_FONTPATH = - -# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for -# each documented class showing the direct and indirect inheritance relations. -# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -CLASS_GRAPH = NO - -# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a -# graph for each documented class showing the direct and indirect implementation -# dependencies (inheritance, containment, and class references variables) of the -# class with other documented classes. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -COLLABORATION_GRAPH = NO - -# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for -# groups, showing the direct groups dependencies. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -GROUP_GRAPHS = NO - -# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -UML_LOOK = NO - -# If the UML_LOOK tag is enabled, the fields and methods are shown inside the -# class node. If there are many fields or methods and many nodes the graph may -# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the -# number of items for each type to make the size more manageable. Set this to 0 -# for no limit. Note that the threshold may be exceeded by 50% before the limit -# is enforced. So when you set the threshold to 10, up to 15 fields may appear, -# but if the number exceeds 15, the total amount of fields shown is limited to -# 10. -# Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. - -UML_LIMIT_NUM_FIELDS = 10 - -# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and -# collaboration graphs will show the relations between templates and their -# instances. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -TEMPLATE_RELATIONS = NO - -# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to -# YES then doxygen will generate a graph for each documented file showing the -# direct and indirect include dependencies of the file with other documented -# files. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -INCLUDE_GRAPH = NO - -# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are -# set to YES then doxygen will generate a graph for each documented file showing -# the direct and indirect include dependencies of the file with other documented -# files. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -INCLUDED_BY_GRAPH = NO - -# If the CALL_GRAPH tag is set to YES then doxygen will generate a call -# dependency graph for every global function or class method. -# -# Note that enabling this option will significantly increase the time of a run. -# So in most cases it will be better to enable call graphs for selected -# functions only using the \callgraph command. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller -# dependency graph for every global function or class method. -# -# Note that enabling this option will significantly increase the time of a run. -# So in most cases it will be better to enable caller graphs for selected -# functions only using the \callergraph command. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical -# hierarchy of all classes instead of a textual one. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -GRAPHICAL_HIERARCHY = NO - -# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the -# dependencies a directory has on other directories in a graphical way. The -# dependency relations are determined by the #include relations between the -# files in the directories. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -DIRECTORY_GRAPH = NO - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. -# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order -# to make the SVG files visible in IE 9+ (other browsers do not have this -# requirement). -# Possible values are: png, jpg, gif and svg. -# The default value is: png. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_IMAGE_FORMAT = png - -# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to -# enable generation of interactive SVG images that allow zooming and panning. -# -# Note that this requires a modern browser other than Internet Explorer. Tested -# and working are Firefox, Chrome, Safari, and Opera. -# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make -# the SVG files visible. Older versions of IE do not have SVG support. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -INTERACTIVE_SVG = NO - -# The DOT_PATH tag can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the \dotfile -# command). -# This tag requires that the tag HAVE_DOT is set to YES. - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the \mscfile -# command). - -MSCFILE_DIRS = - -# The DIAFILE_DIRS tag can be used to specify one or more directories that -# contain dia files that are included in the documentation (see the \diafile -# command). - -DIAFILE_DIRS = - -# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the -# path where java can find the plantuml.jar file. If left blank, it is assumed -# PlantUML is not used or called during a preprocessing step. Doxygen will -# generate a warning when it encounters a \startuml command in this case and -# will not generate output for the diagram. - -PLANTUML_JAR_PATH = - -# When using plantuml, the specified paths are searched for files specified by -# the !include statement in a plantuml block. - -PLANTUML_INCLUDE_PATH = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes -# that will be shown in the graph. If the number of nodes in a graph becomes -# larger than this value, doxygen will truncate the graph, which is visualized -# by representing a node as a red box. Note that doxygen if the number of direct -# children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that -# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. -# Minimum value: 0, maximum value: 10000, default value: 50. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs -# generated by dot. A depth value of 3 means that only nodes reachable from the -# root by following a path via at most 3 edges will be shown. Nodes that lay -# further from the root node will be omitted. Note that setting this option to 1 -# or 2 may greatly reduce the computation time needed for large code bases. Also -# note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. -# Minimum value: 0, maximum value: 1000, default value: 0. -# This tag requires that the tag HAVE_DOT is set to YES. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not seem -# to support this out of the box. -# -# Warning: Depending on the platform used, enabling this option may lead to -# badly anti-aliased labels on the edges of a graph (i.e. they become hard to -# read). -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) support -# this, this feature is disabled by default. -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_MULTI_TARGETS = NO - -# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page -# explaining the meaning of the various boxes and arrows in the dot generated -# graphs. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot -# files that are used to generate the various graphs. -# The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_CLEANUP = YES - diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index 53ae4e4cf4..e631c962b3 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index b846237689..8233da957d 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/build_artifact_python.bat index 295347e947..7c8c2aa12d 100644 --- a/tools/run_tests/build_artifact_python.bat +++ b/tools/run_tests/build_artifact_python.bat @@ -28,33 +28,24 @@ @rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -set NUGET=C:\nuget\nuget.exe -%NUGET% restore vsprojects\grpc.sln || goto :error - - -@call vsprojects\build_vs2013.bat vsprojects\grpc.sln /t:grpc_dll /p:Configuration=Release /p:PlatformToolset=v120 /p:Platform=Win32 || goto :error -@call vsprojects\build_vs2013.bat vsprojects\grpc.sln /t:grpc_dll /p:Configuration=Release /p:PlatformToolset=v120 /p:Platform=x64 || goto :error - -mkdir src\python\grpcio\grpc\_cython\_windows - -@rem TODO(atash): maybe we could avoid the grpc_c.(32|64).python shim below if -@rem this used the right python build? -copy /Y vsprojects\Release\grpc_dll.dll src\python\grpcio\grpc\_cython\_windows\grpc_c.32.python || goto :error -copy /Y vsprojects\x64\Release\grpc_dll.dll src\python\grpcio\grpc\_cython\_windows\grpc_c.64.python || goto :error - set PATH=C:\%1;C:\%1\scripts;C:\msys64\mingw%2\bin;%PATH% pip install --upgrade six pip install --upgrade setuptools pip install -rrequirements.txt -set GRPC_PYTHON_USE_CUSTOM_BDIST=0 -set GRPC_PYTHON_BUILD_WITH_CYTHON=1 - @rem Because this is windows and *everything seems to hate Windows* we have to @rem set all of these flags ourselves because Python won't help us (see the @rem setup.py of the grpcio_tools project). set GRPC_PYTHON_CFLAGS=-fno-wrapv -frtti -std=c++11 + +@rem See https://sourceforge.net/p/mingw-w64/bugs/363/ +if %2 == 32 ( + set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s +) else ( + set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime64 -D_timeb=__timeb64 +) + @rem Further confusing things, MSYS2's mingw64 tries to dynamically link @rem libgcc, libstdc++, and winpthreads. We have to override this or our @rem extensions end up linking to MSYS2 DLLs, which the normal Python on @@ -66,23 +57,18 @@ python -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0] set /p PYTHON_MSVCR=<temp.txt set GRPC_PYTHON_LDFLAGS=-static-libgcc -static-libstdc++ -mcrtdll=%PYTHON_MSVCR% -static -lpthread - -@rem Build gRPC -if %2 == 32 ( - python setup.py build_ext -c mingw32 -) else ( - python setup.py build_ext -c mingw32 -DMS_WIN64 -) -python setup.py bdist_wheel +set GRPC_PYTHON_BUILD_WITH_CYTHON=1 -@rem Build gRPC Python tools +@rem Set up gRPC Python tools python tools\distrib\python\make_grpcio_tools.py -if %2 == 32 ( - python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32 -) else ( - python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32 -DMS_WIN64 -) + +@rem Build gRPC Python extensions +python setup.py build_ext -c mingw32 +python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32 + +@rem Build gRPC Python distributions +python setup.py bdist_wheel python tools\distrib\python\grpcio_tools\setup.py bdist_wheel mkdir artifacts diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh index 687b04e954..a3fa8200d5 100755 --- a/tools/run_tests/build_python.sh +++ b/tools/run_tests/build_python.sh @@ -33,25 +33,91 @@ set -ex # change to grpc repo root cd $(dirname $0)/../.. -# Arguments +########################## +# Portability operations # +########################## + +PLATFORM=`uname -s` + +function is_mingw() { + if [ "${PLATFORM/MINGW}" != "$PLATFORM" ]; then + echo true + else + exit 1 + fi +} + +function is_darwin() { + if [ "${PLATFORM/Darwin}" != "$PLATFORM" ]; then + echo true + else + exit 1 + fi +} + +function is_linux() { + if [ "${PLATFORM/Linux}" != "$PLATFORM" ]; then + echo true + else + exit 1 + fi +} + +# Associated virtual environment name for the given python command. +function venv() { + $1 -c "import sys; print('py{}{}'.format(*sys.version_info[:2]))" +} + +# Path to python executable within a virtual environment depending on the +# system. +function venv_relative_python() { + if [ $(is_mingw) ]; then + echo 'Scripts/python.exe' + else + echo 'bin/python' + fi +} + +# Distutils toolchain to use depending on the system. +function toolchain() { + if [ $(is_mingw) ]; then + echo 'mingw32' + else + echo 'unix' + fi +} + +# Command to invoke the linux command `realpath` or equivalent. +function script_realpath() { + # Find `realpath` + if [ -x "$(command -v realpath)" ]; then + realpath "$@" + elif [ -x "$(command -v grealpath)" ]; then + grealpath "$@" + else + exit 1 + fi +} + +#################### +# Script Arguments # +#################### + PYTHON=${1:-python2.7} -VENV=${2:-py27} -VENV_RELATIVE_PYTHON=${3:-bin/python} -TOOLCHAIN=${4:-unix} +VENV=${2:-$(venv $PYTHON)} +VENV_RELATIVE_PYTHON=${3:-$(venv_relative_python)} +TOOLCHAIN=${4:-$(toolchain)} ROOT=`pwd` -export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv" +export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv $CFLAGS" export GRPC_PYTHON_BUILD_WITH_CYTHON=1 # Default python on the host to fall back to when instantiating e.g. the # virtualenv. HOST_PYTHON=${HOST_PYTHON:-python} -# If ccache is available, use it... unless we're on Mac, then all hell breaks -# loose because Python does hacky things to support other hacky things done to -# hacky things on Mac OS X -PLATFORM=`uname -s` -if [ "${PLATFORM/Darwin}" = "$PLATFORM" ]; then +# If ccache is available on Linux, use it. +if [ $(is_linux) ]; then # We're not on Darwin (Mac OS X) if [ -x "$(command -v ccache)" ]; then if [ -x "$(command -v gcc)" ]; then @@ -61,17 +127,24 @@ if [ "${PLATFORM/Darwin}" = "$PLATFORM" ]; then fi fi fi - -# Find `realpath` -if [ -x "$(command -v realpath)" ]; then - export REALPATH=realpath -elif [ -x "$(command -v grealpath)" ]; then - export REALPATH=grealpath -else - echo 'Couldn'"'"'t find `realpath` or `grealpath`' - exit 1 +# TODO(atash) consider conceptualizing MinGW as a first-class platform and move +# these flags into our `setup.py`s +if [ $(is_mingw) ]; then + # We're on MinGW, and our CFLAGS and LDFLAGS will be eaten by the void. Use + # our work-around environment variables instead. + PYTHON_MSVCR=`$PYTHON -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])"` + export GRPC_PYTHON_LDFLAGS="-static-libgcc -static-libstdc++ -mcrtdll=$PYTHON_MSVCR -static -lpthread" + # See https://sourceforge.net/p/mingw-w64/bugs/363/ + export GRPC_PYTHON_CFLAGS="-D_ftime=_ftime64 -D_timeb=__timeb64" + # TODO(atash) set these flags for only grpcio-tools (they don't do any harm to + # grpcio, but they result in noisy warnings). + export GRPC_PYTHON_CFLAGS="-frtti -std=c++11 $GRPC_PYTHON_CFLAGS" fi +############################ +# Perform build operations # +############################ + # Instnatiate the virtualenv, preferring to do so from the relevant python # version. Even if these commands fail (e.g. on Windows due to name conflicts) # it's possible that the virtualenv is still usable and we trust the tester to @@ -80,7 +153,7 @@ fi ($PYTHON -m virtualenv $VENV || $HOST_PYTHON -m virtualenv -p $PYTHON $VENV || true) -VENV_PYTHON=`$REALPATH -s "$VENV/$VENV_RELATIVE_PYTHON"` +VENV_PYTHON=`script_realpath -s "$VENV/$VENV_RELATIVE_PYTHON"` # pip-installs the directory specified. Used because on MSYS the vanilla Windows # Python gets confused when parsing paths. diff --git a/src/ruby/spec/completion_queue_spec.rb b/tools/run_tests/build_python_msys2.sh index 886a7f263b..6e9d369018 100644 --- a/src/ruby/spec/completion_queue_spec.rb +++ b/tools/run_tests/build_python_msys2.sh @@ -1,4 +1,5 @@ -# Copyright 2015, Google Inc. +#!/bin/bash +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,16 +28,9 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -require 'grpc' +set -ex -describe GRPC::Core::CompletionQueue do - before(:example) do - @cq = GRPC::Core::CompletionQueue.new - end - - describe '#new' do - it 'is constructed successufully' do - expect { GRPC::Core::CompletionQueue.new }.not_to raise_error - end - end -end +BUILD_PYTHON=`realpath "$(dirname $0)/build_python.sh"` +export MSYSTEM=$1 +shift 1 +bash --login $BUILD_PYTHON "$@" diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py index 4fe77487f9..3999537c40 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/jobset.py @@ -47,6 +47,12 @@ measure_cpu_costs = False _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count() _MAX_RESULT_SIZE = 8192 +def sanitized_environment(env): + sanitized = {} + for key, value in env.items(): + sanitized[str(key).encode()] = str(value).encode() + return sanitized + def platform_string(): if platform.system() == 'Windows': return 'windows' @@ -219,6 +225,7 @@ class Job(object): env = dict(os.environ) env.update(self._spec.environ) env.update(self._add_env) + env = sanitized_environment(env) self._start = time.time() cmdline = self._spec.cmdline if measure_cpu_costs: diff --git a/tools/run_tests/performance/run_worker_python.sh b/tools/run_tests/performance/run_worker_python.sh index 3b8ba6f4e4..06cf172d6f 100755 --- a/tools/run_tests/performance/run_worker_python.sh +++ b/tools/run_tests/performance/run_worker_python.sh @@ -32,4 +32,4 @@ set -ex cd $(dirname $0)/../../.. -PYTHONPATH=src/python/grpcio_tests:src/python/grpcio:src/python/gens py27/bin/python src/python/grpcio_tests/tests/qps/qps_worker.py $@ +PYTHONPATH=src/python/grpcio_tests:src/python/gens py27/bin/python src/python/grpcio_tests/tests/qps/qps_worker.py $@ diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py index 2d5130e1e8..4dfd01fc66 100644 --- a/tools/run_tests/performance/scenario_config.py +++ b/tools/run_tests/performance/scenario_config.py @@ -387,45 +387,44 @@ class PythonLanguage: return 500 def scenarios(self): - # TODO(issue #6522): Empty streaming requests does not work for python - #yield _ping_pong_scenario( - # 'python_generic_async_streaming_ping_pong', rpc_type='STREAMING', - # client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - # use_generic_payload=True, - # categories=[SMOKETEST]) + yield _ping_pong_scenario( + 'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING', + client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', + use_generic_payload=True, + categories=[SMOKETEST]) yield _ping_pong_scenario( 'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER') + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER') yield _ping_pong_scenario( 'python_protobuf_async_unary_ping_pong', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='SYNC_SERVER') + client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER') yield _ping_pong_scenario( 'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( 'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( 'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( 'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', server_language='c++', server_core_limit=1, async_server_threads=1, categories=[SMOKETEST]) yield _ping_pong_scenario( 'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', server_language='c++', server_core_limit=1, async_server_threads=1) def __str__(self): diff --git a/tools/run_tests/port_server.py b/tools/run_tests/port_server.py index e2be26d182..83f8e6cd35 100755 --- a/tools/run_tests/port_server.py +++ b/tools/run_tests/port_server.py @@ -42,7 +42,7 @@ import time # increment this number whenever making a change to ensure that # the changes are picked up by running CI servers # note that all changes must be backwards compatible -_MY_VERSION = 8 +_MY_VERSION = 9 if len(sys.argv) == 2 and sys.argv[1] == 'dump_version': @@ -110,6 +110,11 @@ keep_running = True class Handler(BaseHTTPServer.BaseHTTPRequestHandler): + + def setup(self): + # If the client is unreachable for 5 seconds, close the connection + self.timeout = 5 + BaseHTTPServer.BaseHTTPRequestHandler.setup(self) def do_GET(self): global keep_running diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index e1b7cf550f..f081887fc0 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -375,19 +375,15 @@ class PhpLanguage(object): class PythonConfig(collections.namedtuple('PythonConfig', [ - 'python', 'venv', 'venv_relative_python', 'toolchain',])): - - @property - def venv_python(self): - return os.path.abspath('{}/{}'.format(self.venv, self.venv_relative_python)) - + 'name', 'build', 'run'])): + """Tuple of commands (named s.t. 'what it says on the tin' applies)""" class PythonLanguage(object): def configure(self, config, args): self.config = config self.args = args - self.pythons = self._get_pythons(self.args.compiler) + self.pythons = self._get_pythons(self.args) def test_specs(self): # load list of known test suites @@ -395,11 +391,11 @@ class PythonLanguage(object): tests_json = json.load(tests_json_file) environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS) return [self.config.job_spec( - ['tools/run_tests/run_python.sh', config.venv_python], + config.run, timeout_seconds=5*60, environ=dict(environment.items() + [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]), - shortname='%s.test.%s' % (config.venv, suite_name),) + shortname='%s.test.%s' % (config.name, suite_name),) for suite_name in tests_json for config in self.pythons] @@ -413,14 +409,7 @@ class PythonLanguage(object): return [] def build_steps(self): - return [ - [ - 'tools/run_tests/build_python.sh', - config.python, config.venv, - config.venv_relative_python, config.toolchain - ] - for config in self.pythons - ] + return [config.build for config in self.pythons] def post_tests_steps(self): return [] @@ -431,23 +420,50 @@ class PythonLanguage(object): def dockerfile_dir(self): return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch) - def _get_pythons(self, compiler): + def _get_pythons(self, args): + if args.arch == 'x86': + bits = '32' + else: + bits = '64' if os.name == 'nt': - venv_relative_python = 'Scripts/python.exe' - toolchain = 'mingw32' + shell = ['bash'] + builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')] + builder_prefix_arguments = ['MINGW{}'.format(bits)] + venv_relative_python = ['Scripts/python.exe'] + toolchain = ['mingw32'] + python_pattern_function = lambda major, minor, bits: ( + '/c/Python{major}{minor}/python.exe'.format(major=major, minor=minor, bits=bits) + if bits == '64' else + '/c/Python{major}{minor}_{bits}bits/python.exe'.format( + major=major, minor=minor, bits=bits)) else: - venv_relative_python = 'bin/python' - toolchain = 'unix' - python27_config = PythonConfig('python2.7', 'py27', venv_relative_python, toolchain) - python34_config = PythonConfig('python3.4', 'py34', venv_relative_python, toolchain) - if compiler == 'default': - return (python27_config, python34_config,) - elif compiler == 'python2.7': + shell = [] + builder = [os.path.abspath('tools/run_tests/build_python.sh')] + builder_prefix_arguments = [] + venv_relative_python = ['bin/python'] + toolchain = ['unix'] + # Bit-ness is handled by the test machine's environment + python_pattern_function = lambda major, minor, bits: 'python{major}.{minor}'.format(major=major, minor=minor) + runner = [os.path.abspath('tools/run_tests/run_python.sh')] + python_config_generator = lambda name, major, minor, bits: PythonConfig( + name, + shell + builder + builder_prefix_arguments + + [python_pattern_function(major=major, minor=minor, bits=bits)] + + [name] + venv_relative_python + toolchain, + shell + runner + [os.path.join(name, venv_relative_python[0])]) + python27_config = python_config_generator(name='py27', major='2', minor='7', bits=bits) + python34_config = python_config_generator(name='py34', major='3', minor='4', bits=bits) + if args.compiler == 'default': + if os.name == 'nt': + return (python27_config,) + else: + return (python27_config, python34_config,) + elif args.compiler == 'python2.7': return (python27_config,) - elif compiler == 'python3.4': + elif args.compiler == 'python3.4': return (python34_config,) else: - raise Exception('Compiler %s not supported.' % compiler) + raise Exception('Compiler %s not supported.' % args.compiler) def __str__(self): return 'python' @@ -633,10 +649,13 @@ class ObjCLanguage(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'], None, - environ=_FORCE_ENVIRON_FOR_WRAPPERS), + return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'], + timeout_seconds=None, + shortname='objc-tests', + environ=_FORCE_ENVIRON_FOR_WRAPPERS), self.config.job_spec(['src/objective-c/tests/build_example_test.sh'], - None, timeout_seconds=15*60, + timeout_seconds=15*60, + shortname='objc-examples-build', environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): @@ -1074,11 +1093,19 @@ def _shut_down_legacy_server(legacy_server_port): 'http://localhost:%d/quitquitquit' % legacy_server_port).read() -def _start_port_server(port_server_port): - # Temporary patch to switch the port_server port - # see https://github.com/grpc/grpc/issues/7145 - _shut_down_legacy_server(32767) +def _shut_down_legacy_server(legacy_server_port): + try: + version = int(urllib2.urlopen( + 'http://localhost:%d/version_number' % legacy_server_port, + timeout=10).read()) + except: + pass + else: + urllib2.urlopen( + 'http://localhost:%d/quitquitquit' % legacy_server_port).read() + +def _start_port_server(port_server_port): # check if a compatible port server is running # if incompatible (version mismatch) ==> start a new one # if not running ==> start a new one diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh index f2d7a1429e..b602d69564 100755 --- a/tools/run_tests/sanity/check_submodules.sh +++ b/tools/run_tests/sanity/check_submodules.sh @@ -45,7 +45,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules 05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f) c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0) f8ac463766281625ad710900479130c7fcb4d63b third_party/nanopb (nanopb-0.3.4-29-gf8ac463) - d4d13a4349e4e59d67f311185ddcc1890d956d7a third_party/protobuf (v3.0.0-beta-3.2) + bdeb215cab2985195325fcd5e70c3fa751f46e0f third_party/protobuf (v3.0.0-beta-3.3) 50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8) EOF diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 6d7cfdaf23..cdbc254f43 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -4458,6 +4458,7 @@ { "deps": [ "gpr", + "grpc", "grpc++_base", "grpc++_codegen_base", "grpc++_codegen_base_src", @@ -6514,6 +6515,7 @@ }, { "deps": [ + "grpc", "grpc++_codegen_base" ], "headers": [ diff --git a/vsprojects/grpc.sln b/vsprojects/grpc.sln index 6105f724c9..84720914b0 100644 --- a/vsprojects/grpc.sln +++ b/vsprojects/grpc.sln @@ -66,6 +66,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc++_unsecure", "vcxproj\ ProjectSection(ProjectDependencies) = postProject {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} {46CEDFFF-9692-456A-AA24-38B5D6BCF4C5} = {46CEDFFF-9692-456A-AA24-38B5D6BCF4C5} + {29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9} EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_create_jwt", "vcxproj\.\grpc_create_jwt\grpc_create_jwt.vcxproj", "{77971F8D-F583-3E77-0E3C-6C1FB6B1749C}" diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj index f37fefc65a..03be485b29 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj @@ -427,6 +427,9 @@ <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_unsecure\grpc_unsecure.vcxproj"> <Project>{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}</Project> </ProjectReference> + <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj"> + <Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project> + </ProjectReference> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <ImportGroup Label="ExtensionTargets"> |