diff options
159 files changed, 1685 insertions, 1659 deletions
@@ -467,7 +467,6 @@ grpc_cc_library( "src/core/lib/support/arena.c", "src/core/lib/support/atm.c", "src/core/lib/support/avl.c", - "src/core/lib/support/backoff.c", "src/core/lib/support/cmdline.c", "src/core/lib/support/cpu_iphone.c", "src/core/lib/support/cpu_linux.c", @@ -514,8 +513,7 @@ grpc_cc_library( "src/core/lib/support/atomic.h", "src/core/lib/support/atomic_with_atm.h", "src/core/lib/support/atomic_with_std.h", - "src/core/lib/support/backoff.h", - "src/core/lib/support/block_annotate.h", + "src/core/lib/iomgr/block_annotate.h", "src/core/lib/support/env.h", "src/core/lib/support/memory.h", "src/core/lib/support/mpscq.h", @@ -692,6 +690,7 @@ grpc_cc_library( "src/core/lib/transport/timeout_encoding.c", "src/core/lib/transport/transport.c", "src/core/lib/transport/transport_op_string.c", + "src/core/lib/backoff/backoff.c", ], hdrs = [ "src/core/lib/channel/channel_args.h", @@ -809,6 +808,7 @@ grpc_cc_library( "src/core/lib/transport/timeout_encoding.h", "src/core/lib/transport/transport.h", "src/core/lib/transport/transport_impl.h", + "src/core/lib/backoff/backoff.h", ], external_deps = [ "zlib", diff --git a/CMakeLists.txt b/CMakeLists.txt index a5a7fad346..97faee09a9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -379,6 +379,7 @@ add_dependencies(buildtests_c algorithm_test) add_dependencies(buildtests_c alloc_test) add_dependencies(buildtests_c alpn_test) add_dependencies(buildtests_c arena_test) +add_dependencies(buildtests_c backoff_test) add_dependencies(buildtests_c bad_server_response_test) add_dependencies(buildtests_c bdp_estimator_test) add_dependencies(buildtests_c bin_decoder_test) @@ -428,7 +429,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c goaway_server_test) endif() add_dependencies(buildtests_c gpr_avl_test) -add_dependencies(buildtests_c gpr_backoff_test) add_dependencies(buildtests_c gpr_cmdline_test) add_dependencies(buildtests_c gpr_cpu_test) add_dependencies(buildtests_c gpr_env_test) @@ -786,7 +786,6 @@ add_library(gpr src/core/lib/support/arena.c src/core/lib/support/atm.c src/core/lib/support/avl.c - src/core/lib/support/backoff.c src/core/lib/support/cmdline.c src/core/lib/support/cpu_iphone.c src/core/lib/support/cpu_linux.c @@ -955,6 +954,7 @@ endif (gRPC_BUILD_TESTS) add_library(grpc src/core/lib/surface/init.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -1303,6 +1303,7 @@ endif() add_library(grpc_cronet src/core/lib/surface/init.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -1619,6 +1620,7 @@ add_library(grpc_test_util test/core/util/port_server_client.c test/core/util/slice_splitter.c test/core/util/trickle_endpoint.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -1879,6 +1881,7 @@ add_library(grpc_test_util_unsecure test/core/util/port_server_client.c test/core/util/slice_splitter.c test/core/util/trickle_endpoint.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -2125,6 +2128,7 @@ endif (gRPC_BUILD_TESTS) add_library(grpc_unsecure src/core/lib/surface/init.c src/core/lib/surface/init_unsecure.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -2877,6 +2881,7 @@ add_library(grpc++_cronet src/core/ext/transport/chttp2/transport/stream_map.c src/core/ext/transport/chttp2/transport/varint.c src/core/ext/transport/chttp2/transport/writing.c + src/core/lib/backoff/backoff.c src/core/lib/channel/channel_args.c src/core/lib/channel/channel_stack.c src/core/lib/channel/channel_stack_builder.c @@ -5168,6 +5173,35 @@ target_link_libraries(arena_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +add_executable(backoff_test + test/core/backoff/backoff_test.c +) + + +target_include_directories(backoff_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${BENCHMARK_ROOT_DIR}/include + PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include +) + +target_link_libraries(backoff_test + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_test_util + grpc + gpr_test_util + gpr +) + +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + add_executable(bad_server_response_test test/core/end2end/bad_server_response_test.c ) @@ -6268,33 +6302,6 @@ target_link_libraries(gpr_avl_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -add_executable(gpr_backoff_test - test/core/support/backoff_test.c -) - - -target_include_directories(gpr_backoff_test - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include - PRIVATE ${BORINGSSL_ROOT_DIR}/include - PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${BENCHMARK_ROOT_DIR}/include - PRIVATE ${ZLIB_ROOT_DIR} - PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib - PRIVATE ${CARES_INCLUDE_DIR} - PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares - PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include -) - -target_link_libraries(gpr_backoff_test - ${_gRPC_ALLTARGETS_LIBRARIES} - gpr_test_util - gpr -) - -endif (gRPC_BUILD_TESTS) -if (gRPC_BUILD_TESTS) - add_executable(gpr_cmdline_test test/core/support/cmdline_test.c ) @@ -950,6 +950,7 @@ alloc_test: $(BINDIR)/$(CONFIG)/alloc_test alpn_test: $(BINDIR)/$(CONFIG)/alpn_test api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer arena_test: $(BINDIR)/$(CONFIG)/arena_test +backoff_test: $(BINDIR)/$(CONFIG)/backoff_test bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test bdp_estimator_test: $(BINDIR)/$(CONFIG)/bdp_estimator_test bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test @@ -988,7 +989,6 @@ gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables goaway_server_test: $(BINDIR)/$(CONFIG)/goaway_server_test gpr_avl_test: $(BINDIR)/$(CONFIG)/gpr_avl_test -gpr_backoff_test: $(BINDIR)/$(CONFIG)/gpr_backoff_test gpr_cmdline_test: $(BINDIR)/$(CONFIG)/gpr_cmdline_test gpr_cpu_test: $(BINDIR)/$(CONFIG)/gpr_cpu_test gpr_env_test: $(BINDIR)/$(CONFIG)/gpr_env_test @@ -1350,6 +1350,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/alloc_test \ $(BINDIR)/$(CONFIG)/alpn_test \ $(BINDIR)/$(CONFIG)/arena_test \ + $(BINDIR)/$(CONFIG)/backoff_test \ $(BINDIR)/$(CONFIG)/bad_server_response_test \ $(BINDIR)/$(CONFIG)/bdp_estimator_test \ $(BINDIR)/$(CONFIG)/bin_decoder_test \ @@ -1383,7 +1384,6 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/fling_test \ $(BINDIR)/$(CONFIG)/goaway_server_test \ $(BINDIR)/$(CONFIG)/gpr_avl_test \ - $(BINDIR)/$(CONFIG)/gpr_backoff_test \ $(BINDIR)/$(CONFIG)/gpr_cmdline_test \ $(BINDIR)/$(CONFIG)/gpr_cpu_test \ $(BINDIR)/$(CONFIG)/gpr_env_test \ @@ -1761,6 +1761,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/alpn_test || ( echo test alpn_test failed ; exit 1 ) $(E) "[RUN] Testing arena_test" $(Q) $(BINDIR)/$(CONFIG)/arena_test || ( echo test arena_test failed ; exit 1 ) + $(E) "[RUN] Testing backoff_test" + $(Q) $(BINDIR)/$(CONFIG)/backoff_test || ( echo test backoff_test failed ; exit 1 ) $(E) "[RUN] Testing bad_server_response_test" $(Q) $(BINDIR)/$(CONFIG)/bad_server_response_test || ( echo test bad_server_response_test failed ; exit 1 ) $(E) "[RUN] Testing bdp_estimator_test" @@ -1823,8 +1825,6 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/goaway_server_test || ( echo test goaway_server_test failed ; exit 1 ) $(E) "[RUN] Testing gpr_avl_test" $(Q) $(BINDIR)/$(CONFIG)/gpr_avl_test || ( echo test gpr_avl_test failed ; exit 1 ) - $(E) "[RUN] Testing gpr_backoff_test" - $(Q) $(BINDIR)/$(CONFIG)/gpr_backoff_test || ( echo test gpr_backoff_test failed ; exit 1 ) $(E) "[RUN] Testing gpr_cmdline_test" $(Q) $(BINDIR)/$(CONFIG)/gpr_cmdline_test || ( echo test gpr_cmdline_test failed ; exit 1 ) $(E) "[RUN] Testing gpr_cpu_test" @@ -2800,7 +2800,6 @@ LIBGPR_SRC = \ src/core/lib/support/arena.c \ src/core/lib/support/atm.c \ src/core/lib/support/avl.c \ - src/core/lib/support/backoff.c \ src/core/lib/support/cmdline.c \ src/core/lib/support/cpu_iphone.c \ src/core/lib/support/cpu_linux.c \ @@ -2946,6 +2945,7 @@ endif LIBGRPC_SRC = \ src/core/lib/surface/init.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -3294,6 +3294,7 @@ endif LIBGRPC_CRONET_SRC = \ src/core/lib/surface/init.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -3609,6 +3610,7 @@ LIBGRPC_TEST_UTIL_SRC = \ test/core/util/port_server_client.c \ test/core/util/slice_splitter.c \ test/core/util/trickle_endpoint.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -3860,6 +3862,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ test/core/util/port_server_client.c \ test/core/util/slice_splitter.c \ test/core/util/trickle_endpoint.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -4084,6 +4087,7 @@ endif LIBGRPC_UNSECURE_SRC = \ src/core/lib/surface/init.c \ src/core/lib/surface/init_unsecure.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -4819,6 +4823,7 @@ LIBGRPC++_CRONET_SRC = \ src/core/ext/transport/chttp2/transport/stream_map.c \ src/core/ext/transport/chttp2/transport/varint.c \ src/core/ext/transport/chttp2/transport/writing.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -8865,6 +8870,38 @@ endif endif +BACKOFF_TEST_SRC = \ + test/core/backoff/backoff_test.c \ + +BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BACKOFF_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/backoff_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/backoff_test: $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/backoff_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/backoff/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_backoff_test: $(BACKOFF_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(BACKOFF_TEST_OBJS:.o=.dep) +endif +endif + + BAD_SERVER_RESPONSE_TEST_SRC = \ test/core/end2end/bad_server_response_test.c \ @@ -10084,38 +10121,6 @@ endif endif -GPR_BACKOFF_TEST_SRC = \ - test/core/support/backoff_test.c \ - -GPR_BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GPR_BACKOFF_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/gpr_backoff_test: openssl_dep_error - -else - - - -$(BINDIR)/$(CONFIG)/gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LD) $(LDFLAGS) $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gpr_backoff_test - -endif - -$(OBJDIR)/$(CONFIG)/test/core/support/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - -deps_gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(GPR_BACKOFF_TEST_OBJS:.o=.dep) -endif -endif - - GPR_CMDLINE_TEST_SRC = \ test/core/support/cmdline_test.c \ diff --git a/binding.gyp b/binding.gyp index 06dc731935..f718acd09e 100644 --- a/binding.gyp +++ b/binding.gyp @@ -599,7 +599,6 @@ 'src/core/lib/support/arena.c', 'src/core/lib/support/atm.c', 'src/core/lib/support/avl.c', - 'src/core/lib/support/backoff.c', 'src/core/lib/support/cmdline.c', 'src/core/lib/support/cpu_iphone.c', 'src/core/lib/support/cpu_linux.c', @@ -657,6 +656,7 @@ ], 'sources': [ 'src/core/lib/surface/init.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', diff --git a/build.yaml b/build.yaml index d15cf0339c..4e1ad3b66f 100644 --- a/build.yaml +++ b/build.yaml @@ -66,7 +66,6 @@ filegroups: - src/core/lib/support/arena.c - src/core/lib/support/atm.c - src/core/lib/support/avl.c - - src/core/lib/support/backoff.c - src/core/lib/support/cmdline.c - src/core/lib/support/cpu_iphone.c - src/core/lib/support/cpu_linux.c @@ -143,7 +142,6 @@ filegroups: - src/core/lib/support/atomic.h - src/core/lib/support/atomic_with_atm.h - src/core/lib/support/atomic_with_std.h - - src/core/lib/support/backoff.h - src/core/lib/support/block_annotate.h - src/core/lib/support/env.h - src/core/lib/support/memory.h @@ -185,6 +183,7 @@ filegroups: - grpc++_codegen_base - name: grpc_base src: + - src/core/lib/backoff/backoff.c - src/core/lib/channel/channel_args.c - src/core/lib/channel/channel_stack.c - src/core/lib/channel/channel_stack_builder.c @@ -335,6 +334,7 @@ filegroups: - include/grpc/status.h - include/grpc/support/workaround_list.h headers: + - src/core/lib/backoff/backoff.h - src/core/lib/channel/channel_args.h - src/core/lib/channel/channel_stack.h - src/core/lib/channel/channel_stack_builder.h @@ -1768,6 +1768,16 @@ targets: deps: - gpr_test_util - gpr +- name: backoff_test + build: test + language: c + src: + - test/core/backoff/backoff_test.c + deps: + - grpc_test_util + - grpc + - gpr_test_util + - gpr - name: bad_server_response_test build: test language: c @@ -2196,14 +2206,6 @@ targets: deps: - gpr_test_util - gpr -- name: gpr_backoff_test - build: test - language: c - src: - - test/core/support/backoff_test.c - deps: - - gpr_test_util - - gpr - name: gpr_cmdline_test build: test language: c @@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/support/arena.c \ src/core/lib/support/atm.c \ src/core/lib/support/avl.c \ - src/core/lib/support/backoff.c \ src/core/lib/support/cmdline.c \ src/core/lib/support/cpu_iphone.c \ src/core/lib/support/cpu_linux.c \ @@ -86,6 +85,7 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/support/tmpfile_windows.c \ src/core/lib/support/wrap_memcpy.c \ src/core/lib/surface/init.c \ + src/core/lib/backoff/backoff.c \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ @@ -684,6 +684,7 @@ if test "$PHP_GRPC" != "no"; then PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/server/secure) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/transport) PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/inproc) + PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/backoff) PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/channel) PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/compression) PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/debug) diff --git a/config.w32 b/config.w32 index 92faad7a8f..63dc6317bc 100644 --- a/config.w32 +++ b/config.w32 @@ -22,7 +22,6 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\support\\arena.c " + "src\\core\\lib\\support\\atm.c " + "src\\core\\lib\\support\\avl.c " + - "src\\core\\lib\\support\\backoff.c " + "src\\core\\lib\\support\\cmdline.c " + "src\\core\\lib\\support\\cpu_iphone.c " + "src\\core\\lib\\support\\cpu_linux.c " + @@ -63,6 +62,7 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\support\\tmpfile_windows.c " + "src\\core\\lib\\support\\wrap_memcpy.c " + "src\\core\\lib\\surface\\init.c " + + "src\\core\\lib\\backoff\\backoff.c " + "src\\core\\lib\\channel\\channel_args.c " + "src\\core\\lib\\channel\\channel_stack.c " + "src\\core\\lib\\channel\\channel_stack_builder.c " + @@ -697,6 +697,7 @@ if (PHP_GRPC != "no") { FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\chttp2\\transport"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\inproc"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib"); + FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\backoff"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\channel"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\compression"); FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\debug"); diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 2f1f415866..76cc83df73 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -188,7 +188,6 @@ Pod::Spec.new do |s| 'src/core/lib/support/atomic.h', 'src/core/lib/support/atomic_with_atm.h', 'src/core/lib/support/atomic_with_std.h', - 'src/core/lib/support/backoff.h', 'src/core/lib/support/block_annotate.h', 'src/core/lib/support/env.h', 'src/core/lib/support/memory.h', @@ -206,7 +205,6 @@ Pod::Spec.new do |s| 'src/core/lib/support/arena.c', 'src/core/lib/support/atm.c', 'src/core/lib/support/avl.c', - 'src/core/lib/support/backoff.c', 'src/core/lib/support/cmdline.c', 'src/core/lib/support/cpu_iphone.c', 'src/core/lib/support/cpu_linux.c', @@ -318,6 +316,7 @@ Pod::Spec.new do |s| 'src/core/ext/filters/deadline/deadline_filter.h', 'src/core/ext/transport/chttp2/client/chttp2_connector.h', 'src/core/ext/transport/inproc/inproc_transport.h', + 'src/core/lib/backoff/backoff.h', 'src/core/lib/channel/channel_args.h', 'src/core/lib/channel/channel_stack.h', 'src/core/lib/channel/channel_stack_builder.h', @@ -468,6 +467,7 @@ Pod::Spec.new do |s| 'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h', 'src/core/ext/filters/workarounds/workaround_utils.h', 'src/core/lib/surface/init.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', @@ -729,7 +729,6 @@ Pod::Spec.new do |s| 'src/core/lib/support/atomic.h', 'src/core/lib/support/atomic_with_atm.h', 'src/core/lib/support/atomic_with_std.h', - 'src/core/lib/support/backoff.h', 'src/core/lib/support/block_annotate.h', 'src/core/lib/support/env.h', 'src/core/lib/support/memory.h', @@ -813,6 +812,7 @@ Pod::Spec.new do |s| 'src/core/ext/filters/deadline/deadline_filter.h', 'src/core/ext/transport/chttp2/client/chttp2_connector.h', 'src/core/ext/transport/inproc/inproc_transport.h', + 'src/core/lib/backoff/backoff.h', 'src/core/lib/channel/channel_args.h', 'src/core/lib/channel/channel_stack.h', 'src/core/lib/channel/channel_stack_builder.h', diff --git a/grpc.gemspec b/grpc.gemspec index 96323aed10..bca52e300d 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -88,7 +88,6 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/support/atomic.h ) s.files += %w( src/core/lib/support/atomic_with_atm.h ) s.files += %w( src/core/lib/support/atomic_with_std.h ) - s.files += %w( src/core/lib/support/backoff.h ) s.files += %w( src/core/lib/support/block_annotate.h ) s.files += %w( src/core/lib/support/env.h ) s.files += %w( src/core/lib/support/memory.h ) @@ -106,7 +105,6 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/support/arena.c ) s.files += %w( src/core/lib/support/atm.c ) s.files += %w( src/core/lib/support/avl.c ) - s.files += %w( src/core/lib/support/backoff.c ) s.files += %w( src/core/lib/support/cmdline.c ) s.files += %w( src/core/lib/support/cpu_iphone.c ) s.files += %w( src/core/lib/support/cpu_linux.c ) @@ -251,6 +249,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/deadline/deadline_filter.h ) s.files += %w( src/core/ext/transport/chttp2/client/chttp2_connector.h ) s.files += %w( src/core/ext/transport/inproc/inproc_transport.h ) + s.files += %w( src/core/lib/backoff/backoff.h ) s.files += %w( src/core/lib/channel/channel_args.h ) s.files += %w( src/core/lib/channel/channel_stack.h ) s.files += %w( src/core/lib/channel/channel_stack_builder.h ) @@ -405,6 +404,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h ) s.files += %w( src/core/ext/filters/workarounds/workaround_utils.h ) s.files += %w( src/core/lib/surface/init.c ) + s.files += %w( src/core/lib/backoff/backoff.c ) s.files += %w( src/core/lib/channel/channel_args.c ) s.files += %w( src/core/lib/channel/channel_stack.c ) s.files += %w( src/core/lib/channel/channel_stack_builder.c ) @@ -163,7 +163,6 @@ 'src/core/lib/support/arena.c', 'src/core/lib/support/atm.c', 'src/core/lib/support/avl.c', - 'src/core/lib/support/backoff.c', 'src/core/lib/support/cmdline.c', 'src/core/lib/support/cpu_iphone.c', 'src/core/lib/support/cpu_linux.c', @@ -223,6 +222,7 @@ ], 'sources': [ 'src/core/lib/surface/init.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', @@ -522,6 +522,7 @@ 'test/core/util/port_server_client.c', 'test/core/util/slice_splitter.c', 'test/core/util/trickle_endpoint.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', @@ -726,6 +727,7 @@ 'test/core/util/port_server_client.c', 'test/core/util/slice_splitter.c', 'test/core/util/trickle_endpoint.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', @@ -915,6 +917,7 @@ 'sources': [ 'src/core/lib/surface/init.c', 'src/core/lib/surface/init_unsecure.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', diff --git a/include/grpc/impl/codegen/atm_gcc_atomic.h b/include/grpc/impl/codegen/atm_gcc_atomic.h index 1793ec22b8..76ce863914 100644 --- a/include/grpc/impl/codegen/atm_gcc_atomic.h +++ b/include/grpc/impl/codegen/atm_gcc_atomic.h @@ -25,6 +25,7 @@ typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #ifdef GPR_LOW_LEVEL_COUNTERS extern gpr_atm gpr_counter_atm_cas; diff --git a/include/grpc/impl/codegen/atm_gcc_sync.h b/include/grpc/impl/codegen/atm_gcc_sync.h index 27ae0f63d5..a9e4da3a0f 100644 --- a/include/grpc/impl/codegen/atm_gcc_sync.h +++ b/include/grpc/impl/codegen/atm_gcc_sync.h @@ -25,6 +25,7 @@ typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory") diff --git a/include/grpc/impl/codegen/atm_windows.h b/include/grpc/impl/codegen/atm_windows.h index dfcaa4cc37..b868d79aef 100644 --- a/include/grpc/impl/codegen/atm_windows.h +++ b/include/grpc/impl/codegen/atm_windows.h @@ -24,6 +24,7 @@ typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #define gpr_atm_full_barrier MemoryBarrier diff --git a/package.xml b/package.xml index 0c201afdae..b2899e924a 100644 --- a/package.xml +++ b/package.xml @@ -100,7 +100,6 @@ <file baseinstalldir="/" name="src/core/lib/support/atomic.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/atomic_with_atm.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/atomic_with_std.h" role="src" /> - <file baseinstalldir="/" name="src/core/lib/support/backoff.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/block_annotate.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/env.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/memory.h" role="src" /> @@ -118,7 +117,6 @@ <file baseinstalldir="/" name="src/core/lib/support/arena.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/atm.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/avl.c" role="src" /> - <file baseinstalldir="/" name="src/core/lib/support/backoff.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/cmdline.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/cpu_iphone.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/cpu_linux.c" role="src" /> @@ -263,6 +261,7 @@ <file baseinstalldir="/" name="src/core/ext/filters/deadline/deadline_filter.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/chttp2_connector.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_transport.h" role="src" /> + <file baseinstalldir="/" name="src/core/lib/backoff/backoff.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_args.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_stack.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.h" role="src" /> @@ -417,6 +416,7 @@ <file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_utils.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/surface/init.c" role="src" /> + <file baseinstalldir="/" name="src/core/lib/backoff/backoff.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_args.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_stack.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.c" role="src" /> diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c index 3844b98021..f79d3bdb56 100644 --- a/src/core/ext/filters/client_channel/channel_connectivity.c +++ b/src/core/ext/filters/client_channel/channel_connectivity.c @@ -186,8 +186,8 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg, watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg; grpc_timer_init(exec_ctx, &wa->w->alarm, - gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC), - &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timespec_to_millis_round_up(wa->deadline), + &wa->w->on_timeout); gpr_free(wa); } diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 016199b1f4..051419f2e4 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -68,7 +68,7 @@ typedef enum { typedef struct { gpr_refcount refs; - gpr_timespec timeout; + grpc_millis timeout; wait_for_ready_value wait_for_ready; } method_parameters; @@ -98,17 +98,18 @@ static bool parse_wait_for_ready(grpc_json *field, return true; } -static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) { +static bool parse_timeout(grpc_json *field, grpc_millis *timeout) { if (field->type != GRPC_JSON_STRING) return false; size_t len = strlen(field->value); if (field->value[len - 1] != 's') return false; char *buf = gpr_strdup(field->value); buf[len - 1] = '\0'; // Remove trailing 's'. char *decimal_point = strchr(buf, '.'); + int nanos = 0; if (decimal_point != NULL) { *decimal_point = '\0'; - timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1); - if (timeout->tv_nsec == -1) { + nanos = gpr_parse_nonnegative_int(decimal_point + 1); + if (nanos == -1) { gpr_free(buf); return false; } @@ -127,24 +128,25 @@ static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) { gpr_free(buf); return false; } - timeout->tv_nsec *= multiplier; + nanos *= multiplier; } - timeout->tv_sec = gpr_parse_nonnegative_int(buf); + int seconds = gpr_parse_nonnegative_int(buf); gpr_free(buf); - if (timeout->tv_sec == -1) return false; + if (seconds == -1) return false; + *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS; return true; } static void *method_parameters_create_from_json(const grpc_json *json) { wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET; - gpr_timespec timeout = {0, 0, GPR_TIMESPAN}; + grpc_millis timeout = 0; for (grpc_json *field = json->child; field != NULL; field = field->next) { if (field->key == NULL) continue; if (strcmp(field->key, "waitForReady") == 0) { if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate. if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL; } else if (strcmp(field->key, "timeout") == 0) { - if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate. + if (timeout > 0) return NULL; // Duplicate. if (!parse_timeout(field, &timeout)) return NULL; } } @@ -823,7 +825,7 @@ typedef struct client_channel_call_data { grpc_slice path; // Request path. gpr_timespec call_start_time; - gpr_timespec deadline; + grpc_millis deadline; gpr_arena *arena; grpc_call_stack *owning_call; grpc_call_combiner *call_combiner; @@ -976,11 +978,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx, // If the deadline from the service config is shorter than the one // from the client API, reset the deadline timer. if (chand->deadline_checking_enabled && - gpr_time_cmp(calld->method_params->timeout, - gpr_time_0(GPR_TIMESPAN)) != 0) { - const gpr_timespec per_method_deadline = - gpr_time_add(calld->call_start_time, calld->method_params->timeout); - if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) { + calld->method_params->timeout != 0) { + const grpc_millis per_method_deadline = + grpc_timespec_to_millis_round_up(calld->call_start_time) + + calld->method_params->timeout; + if (per_method_deadline < calld->deadline) { calld->deadline = per_method_deadline; grpc_deadline_state_reset(exec_ctx, elem, calld->deadline); } @@ -1418,7 +1420,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, // Initialize data members. calld->path = grpc_slice_ref_internal(args->path); calld->call_start_time = args->start_time; - calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); + calld->deadline = args->deadline; calld->arena = args->arena; calld->owning_call = args->call_stack; calld->call_combiner = args->call_combiner; diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h index 7f3d4a1cc0..bf79f261b0 100644 --- a/src/core/ext/filters/client_channel/connector.h +++ b/src/core/ext/filters/client_channel/connector.h @@ -34,7 +34,7 @@ typedef struct { /** set of pollsets interested in this connection */ grpc_pollset_set *interested_parties; /** deadline for connection */ - gpr_timespec deadline; + grpc_millis deadline; /** channel arguments (to be passed to transport) */ const grpc_channel_args *channel_args; } grpc_connect_in_args; diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index 85ef7894ea..0423615105 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -102,6 +102,7 @@ #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/subchannel_index.h" +#include "src/core/lib/backoff/backoff.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/iomgr/combiner.h" @@ -111,7 +112,6 @@ #include "src/core/lib/slice/slice_hash_table.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/backoff.h" #include "src/core/lib/support/string.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel.h" @@ -385,7 +385,7 @@ typedef struct glb_lb_policy { grpc_slice lb_call_status_details; /** LB call retry backoff state */ - gpr_backoff lb_call_backoff_state; + grpc_backoff lb_call_backoff_state; /** LB call retry timer */ grpc_timer lb_call_retry_timer; @@ -397,7 +397,7 @@ typedef struct glb_lb_policy { * recreated whenever lb_call is replaced. */ grpc_grpclb_client_stats *client_stats; /* Interval and timer for next client load report. */ - gpr_timespec client_stats_report_interval; + grpc_millis client_stats_report_interval; grpc_timer client_load_report_timer; bool client_load_report_timer_pending; bool last_client_load_report_counters_were_zero; @@ -1072,7 +1072,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, static void start_picking_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { glb_policy->started_picking = true; - gpr_backoff_reset(&glb_policy->lb_call_backoff_state); + grpc_backoff_reset(&glb_policy->lb_call_backoff_state); query_for_backends_locked(exec_ctx, glb_policy); } @@ -1178,15 +1178,14 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { - const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - const gpr_timespec next_client_load_report_time = - gpr_time_add(now, glb_policy->client_stats_report_interval); + const grpc_millis next_client_load_report_time = + grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval; GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, send_client_load_report_locked, glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner)); grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer, next_client_load_report_time, - &glb_policy->client_load_report_closure, now); + &glb_policy->client_load_report_closure); } static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg, @@ -1286,12 +1285,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, * glb_policy->base.interested_parties, which is comprised of the polling * entities from \a client_channel. */ grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name); - gpr_timespec deadline = + grpc_millis deadline = glb_policy->lb_call_timeout_ms == 0 - ? gpr_inf_future(GPR_CLOCK_MONOTONIC) - : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_millis(glb_policy->lb_call_timeout_ms, - GPR_TIMESPAN)); + ? GRPC_MILLIS_INF_FUTURE + : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms; glb_policy->lb_call = grpc_channel_create_pollset_set_call( exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS, glb_policy->base.interested_parties, @@ -1325,12 +1322,12 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, lb_on_response_received_locked, glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner)); - gpr_backoff_init(&glb_policy->lb_call_backoff_state, - GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_GRPCLB_RECONNECT_JITTER, - GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000); + grpc_backoff_init(&glb_policy->lb_call_backoff_state, + GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS, + GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER, + GRPC_GRPCLB_RECONNECT_JITTER, + GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000, + GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000); glb_policy->initial_request_sent = false; glb_policy->seen_initial_response = false; @@ -1457,7 +1454,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, memset(ops, 0, sizeof(ops)); grpc_op *op = ops; if (glb_policy->lb_response_payload != NULL) { - gpr_backoff_reset(&glb_policy->lb_call_backoff_state); + grpc_backoff_reset(&glb_policy->lb_call_backoff_state); /* Received data from the LB server. Look inside * glb_policy->lb_response_payload, for a serverlist. */ grpc_byte_buffer_reader bbr; @@ -1471,16 +1468,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, (response = grpc_grpclb_initial_response_parse(response_slice)) != NULL) { if (response->has_client_stats_report_interval) { - glb_policy->client_stats_report_interval = - gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN), - grpc_grpclb_duration_to_timespec( - &response->client_stats_report_interval)); + glb_policy->client_stats_report_interval = GPR_MAX( + GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis( + &response->client_stats_report_interval)); if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "received initial LB response message; " - "client load reporting interval = %" PRId64 ".%09d sec", - glb_policy->client_stats_report_interval.tv_sec, - glb_policy->client_stats_report_interval.tv_nsec); + "client load reporting interval = %" PRIdPTR " milliseconds", + glb_policy->client_stats_report_interval); } /* take a weak ref (won't prevent calling of \a glb_shutdown() if the * strong ref count goes to zero) to be unref'd in @@ -1611,17 +1606,16 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, glb_policy->updating_lb_call = false; } else if (!glb_policy->shutting_down) { /* if we aren't shutting down, restart the LB client call after some time */ - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = - gpr_backoff_step(&glb_policy->lb_call_backoff_state, now); + grpc_millis next_try = + grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state); if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", (void *)glb_policy); - gpr_timespec timeout = gpr_time_sub(next_try, now); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { + grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); + if (timeout > 0) { gpr_log(GPR_DEBUG, - "... retry_timer_active in %" PRId64 ".%09d seconds.", - timeout.tv_sec, timeout.tv_nsec); + "... retry_timer_active in %" PRIdPTR " milliseconds.", + timeout); } else { gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); } @@ -1632,7 +1626,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, grpc_combiner_scheduler(glb_policy->base.combiner)); glb_policy->retry_timer_active = true; grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, - &glb_policy->lb_on_call_retry, now); + &glb_policy->lb_on_call_retry); } GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "lb_on_server_status_received_locked"); diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c index 8ef6dfc6f4..9c6fb94bf1 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c @@ -299,13 +299,10 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs, return 0; } -gpr_timespec grpc_grpclb_duration_to_timespec( - grpc_grpclb_duration *duration_pb) { - gpr_timespec duration; - duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0; - duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0; - duration.clock_type = GPR_TIMESPAN; - return duration; +grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) { + return (duration_pb->has_seconds ? duration_pb->seconds : 0) * + GPR_MS_PER_SEC + + (duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS; } void grpc_grpclb_initial_response_destroy( diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h index c4a98492c9..56b9c096d0 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -81,8 +81,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist); int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs, const grpc_grpclb_duration *rhs); -gpr_timespec grpc_grpclb_duration_to_timespec( - grpc_grpclb_duration *duration_pb); +grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb); /** Destroy \a initial_response */ void grpc_grpclb_initial_response_destroy( diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c index 9bb229ad95..34132e6b51 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c @@ -32,13 +32,13 @@ #include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/backoff/backoff.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/gethostname.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/json/json.h" -#include "src/core/lib/support/backoff.h" #include "src/core/lib/support/env.h" #include "src/core/lib/support/string.h" #include "src/core/lib/transport/service_config.h" @@ -89,7 +89,7 @@ typedef struct { bool have_retry_timer; grpc_timer retry_timer; /** retry backoff state */ - gpr_backoff backoff_state; + grpc_backoff backoff_state; /** currently resolving addresses */ grpc_lb_addresses *lb_addresses; @@ -137,7 +137,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) { ares_dns_resolver *r = (ares_dns_resolver *)resolver; if (!r->resolving) { - gpr_backoff_reset(&r->backoff_state); + grpc_backoff_reset(&r->backoff_state); dns_ares_start_resolving_locked(exec_ctx, r); } } @@ -271,22 +271,20 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, } else { const char *msg = grpc_error_string(error); gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now); - gpr_timespec timeout = gpr_time_sub(next_try, now); + grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state); + grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", grpc_error_string(error)); GPR_ASSERT(!r->have_retry_timer); r->have_retry_timer = true; GRPC_RESOLVER_REF(&r->base, "retry-timer"); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec, - timeout.tv_nsec); + if (timeout > 0) { + gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout); } else { gpr_log(GPR_DEBUG, "retrying immediately"); } grpc_timer_init(exec_ctx, &r->retry_timer, next_try, - &r->dns_ares_on_retry_timer_locked, now); + &r->dns_ares_on_retry_timer_locked); } if (r->resolved_result != NULL) { grpc_channel_args_destroy(exec_ctx, r->resolved_result); @@ -307,7 +305,7 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, r->next_completion = on_complete; r->target_result = target_result; if (r->resolved_version == 0 && !r->resolving) { - gpr_backoff_reset(&r->backoff_state); + grpc_backoff_reset(&r->backoff_state); dns_ares_start_resolving_locked(exec_ctx, r); } else { dns_ares_maybe_finish_next_locked(exec_ctx, r); @@ -381,11 +379,11 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx, grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, args->pollset_set); } - gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_DNS_RECONNECT_JITTER, - GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); + grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, + GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, + GRPC_DNS_RECONNECT_JITTER, + GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, + GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked, dns_ares_on_retry_timer_locked, r, grpc_combiner_scheduler(r->base.combiner)); diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c index 5ea75f0554..e4bffbe871 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c @@ -24,11 +24,11 @@ #include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/backoff/backoff.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/support/backoff.h" #include "src/core/lib/support/env.h" #include "src/core/lib/support/string.h" @@ -67,7 +67,7 @@ typedef struct { grpc_timer retry_timer; grpc_closure on_retry; /** retry backoff state */ - gpr_backoff backoff_state; + grpc_backoff backoff_state; /** currently resolving addresses */ grpc_resolved_addresses *addresses; @@ -110,7 +110,7 @@ static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) { dns_resolver *r = (dns_resolver *)resolver; if (!r->resolving) { - gpr_backoff_reset(&r->backoff_state); + grpc_backoff_reset(&r->backoff_state); dns_start_resolving_locked(exec_ctx, r); } } @@ -123,7 +123,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, r->next_completion = on_complete; r->target_result = target_result; if (r->resolved_version == 0 && !r->resolving) { - gpr_backoff_reset(&r->backoff_state); + grpc_backoff_reset(&r->backoff_state); dns_start_resolving_locked(exec_ctx, r); } else { dns_maybe_finish_next_locked(exec_ctx, r); @@ -150,6 +150,9 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_channel_args *result = NULL; GPR_ASSERT(r->resolving); r->resolving = false; + GRPC_ERROR_REF(error); + error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, + grpc_slice_from_copied_string(r->name_to_resolve)); if (r->addresses != NULL) { grpc_lb_addresses *addresses = grpc_lb_addresses_create( r->addresses->naddrs, NULL /* user_data_vtable */); @@ -164,23 +167,21 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_resolved_addresses_destroy(r->addresses); grpc_lb_addresses_destroy(exec_ctx, addresses); } else { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now); - gpr_timespec timeout = gpr_time_sub(next_try, now); + grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state); + grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", grpc_error_string(error)); GPR_ASSERT(!r->have_retry_timer); r->have_retry_timer = true; GRPC_RESOLVER_REF(&r->base, "retry-timer"); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec, - timeout.tv_nsec); + if (timeout > 0) { + gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout); } else { gpr_log(GPR_DEBUG, "retrying immediately"); } GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r, grpc_combiner_scheduler(r->base.combiner)); - grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now); + grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry); } if (r->resolved_result != NULL) { grpc_channel_args_destroy(exec_ctx, r->resolved_result); @@ -188,6 +189,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, r->resolved_result = result; r->resolved_version++; dns_maybe_finish_next_locked(exec_ctx, r); + GRPC_ERROR_UNREF(error); GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving"); } @@ -251,11 +253,11 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx, grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, args->pollset_set); } - gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_DNS_RECONNECT_JITTER, - GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); + grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, + GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, + GRPC_DNS_RECONNECT_JITTER, + GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, + GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); return &r->base; } diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c index 40a51c72d6..bcc41de98c 100644 --- a/src/core/ext/filters/client_channel/subchannel.c +++ b/src/core/ext/filters/client_channel/subchannel.c @@ -30,6 +30,7 @@ #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/ext/filters/client_channel/uri_parser.h" +#include "src/core/lib/backoff/backoff.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" #include "src/core/lib/debug/stats.h" @@ -37,7 +38,6 @@ #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/backoff.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel_init.h" #include "src/core/lib/transport/connectivity_state.h" @@ -117,9 +117,9 @@ struct grpc_subchannel { external_state_watcher root_external_state_watcher; /** next connect attempt time */ - gpr_timespec next_attempt; + grpc_millis next_attempt; /** backoff state */ - gpr_backoff backoff_state; + grpc_backoff backoff_state; /** do we have an active alarm? */ bool have_alarm; /** have we started the backoff loop */ @@ -367,7 +367,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx, } } } - gpr_backoff_init( + grpc_backoff_init( &c->backoff_state, initial_backoff_ms, fixed_reconnect_backoff ? 1.0 : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER, @@ -431,8 +431,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } if (error == GRPC_ERROR_NONE) { gpr_log(GPR_INFO, "Failed to connect to channel, retrying"); - c->next_attempt = - gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC)); + c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state); continue_connect_locked(exec_ctx, c); gpr_mu_unlock(&c->mu); } else { @@ -467,24 +466,22 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx, c->connecting = true; GRPC_SUBCHANNEL_WEAK_REF(c, "connecting"); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); if (!c->backoff_begun) { c->backoff_begun = true; - c->next_attempt = gpr_backoff_begin(&c->backoff_state, now); + c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state); continue_connect_locked(exec_ctx, c); } else { GPR_ASSERT(!c->have_alarm); c->have_alarm = true; - gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now); - if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <= - 0) { + const grpc_millis time_til_next = + c->next_attempt - grpc_exec_ctx_now(exec_ctx); + if (time_til_next <= 0) { gpr_log(GPR_INFO, "Retry immediately"); } else { - gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds", - time_til_next.tv_sec, time_til_next.tv_nsec); + gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next); } GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now); + grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm); } } diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h index 51d712f6a7..73702d56a6 100644 --- a/src/core/ext/filters/client_channel/subchannel.h +++ b/src/core/ext/filters/client_channel/subchannel.h @@ -103,7 +103,7 @@ typedef struct { grpc_polling_entity *pollent; grpc_slice path; gpr_timespec start_time; - gpr_timespec deadline; + grpc_millis deadline; gpr_arena *arena; grpc_call_context_element *context; grpc_call_combiner *call_combiner; diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c index 1aed488077..2b2eac2ac5 100644 --- a/src/core/ext/filters/deadline/deadline_filter.c +++ b/src/core/ext/filters/deadline/deadline_filter.c @@ -86,9 +86,8 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, // synchronized. static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - gpr_timespec deadline) { - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) { + grpc_millis deadline) { + if (deadline == GRPC_MILLIS_INF_FUTURE) { return; } grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; @@ -114,8 +113,7 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, } GPR_ASSERT(closure != NULL); GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); - grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure); } // Cancels the deadline timer. @@ -155,7 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state, struct start_timer_after_init_state { bool in_call_combiner; grpc_call_element* elem; - gpr_timespec deadline; + grpc_millis deadline; grpc_closure closure; }; static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, @@ -182,14 +180,13 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, grpc_call_combiner* call_combiner, - gpr_timespec deadline) { + grpc_millis deadline) { grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; deadline_state->call_stack = call_stack; deadline_state->call_combiner = call_combiner; // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) { + if (deadline != GRPC_MILLIS_INF_FUTURE) { // When the deadline passes, we indicate the failure by sending down // an op with cancel_error set. However, we can't send down any ops // until after the call stack is fully initialized. If we start the @@ -214,7 +211,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, } void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - gpr_timespec new_deadline) { + grpc_millis new_deadline) { grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; cancel_timer_if_needed(exec_ctx, deadline_state); start_timer_if_needed(exec_ctx, elem, new_deadline); diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h index 3eb102ad28..8d835d0382 100644 --- a/src/core/ext/filters/deadline/deadline_filter.h +++ b/src/core/ext/filters/deadline/deadline_filter.h @@ -52,7 +52,8 @@ typedef struct grpc_deadline_state { void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, grpc_call_combiner* call_combiner, - gpr_timespec deadline); + grpc_millis deadline); + void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem); @@ -66,7 +67,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, // // Note: Must be called while holding the call combiner. void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - gpr_timespec new_deadline); + grpc_millis new_deadline); // To be called from the client-side filter's start_transport_stream_op_batch() // method. Ensures that the deadline timer is cancelled when the call diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 0ac803ed41..b7032f288d 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -56,11 +56,11 @@ typedef struct channel_data { max_connection_idle */ grpc_timer max_idle_timer; /* Allowed max time a channel may have no outstanding rpcs */ - gpr_timespec max_connection_idle; + grpc_millis max_connection_idle; /* Allowed max time a channel may exist */ - gpr_timespec max_connection_age; + grpc_millis max_connection_age; /* Allowed grace period after the channel reaches its max age */ - gpr_timespec max_connection_age_grace; + grpc_millis max_connection_age_grace; /* Closure to run when the channel's idle duration reaches max_connection_idle and should be closed gracefully */ grpc_closure close_max_idle_channel; @@ -99,10 +99,9 @@ static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) { static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) { if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) { GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer"); - grpc_timer_init( - exec_ctx, &chand->max_idle_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle), - &chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &chand->max_idle_timer, + grpc_exec_ctx_now(exec_ctx) + chand->max_connection_idle, + &chand->close_max_idle_channel); } } @@ -123,10 +122,9 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer"); - grpc_timer_init( - exec_ctx, &chand->max_age_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age), - &chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &chand->max_age_timer, + grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age, + &chand->close_max_age_channel); gpr_mu_unlock(&chand->max_age_timer_mu); grpc_transport_op* op = grpc_make_transport_op(NULL); op->on_connectivity_state_change = &chand->channel_connectivity_changed, @@ -144,11 +142,12 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx, gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer"); - grpc_timer_init(exec_ctx, &chand->max_age_grace_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - chand->max_connection_age_grace), - &chand->force_close_max_age_channel, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init( + exec_ctx, &chand->max_age_grace_timer, + chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE + ? GRPC_MILLIS_INF_FUTURE + : grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age_grace, + &chand->force_close_max_age_channel); gpr_mu_unlock(&chand->max_age_timer_mu); GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, "max_age start_max_age_grace_timer_after_goaway_op"); @@ -249,7 +248,8 @@ static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg, connection storms. Note that the MAX_CONNECTION_AGE option without jitter would not create connection storms by itself, but if there happened to be a connection storm it could cause it to repeat at a fixed period. */ -static int add_random_max_connection_age_jitter(int value) { +static grpc_millis +add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) { /* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and 1 + MAX_CONNECTION_AGE_JITTER */ double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX + @@ -257,7 +257,9 @@ static int add_random_max_connection_age_jitter(int value) { double result = multiplier * value; /* INT_MAX - 0.5 converts the value to float, so that result will not be cast to int implicitly before the comparison. */ - return result > INT_MAX - 0.5 ? INT_MAX : (int)result; + return result > ((double)GRPC_MILLIS_INF_FUTURE) - 0.5 + ? GRPC_MILLIS_INF_FUTURE + : (grpc_millis)result; } /* Constructor for call_data. */ @@ -287,30 +289,23 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, chand->max_age_grace_timer_pending = false; chand->channel_stack = args->channel_stack; chand->max_connection_age = - DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(add_random_max_connection_age_jitter( - DEFAULT_MAX_CONNECTION_AGE_MS), - GPR_TIMESPAN); + add_random_max_connection_age_jitter_and_convert_to_grpc_millis( + DEFAULT_MAX_CONNECTION_AGE_MS); chand->max_connection_age_grace = DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, - GPR_TIMESPAN); - chand->max_connection_idle = - DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN); + ? GRPC_MILLIS_INF_FUTURE + : DEFAULT_MAX_CONNECTION_AGE_GRACE_MS; + chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : DEFAULT_MAX_CONNECTION_IDLE_MS; for (size_t i = 0; i < args->channel_args->num_args; ++i) { if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_AGE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS); chand->max_connection_age = - value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis( - add_random_max_connection_age_jitter(value), GPR_TIMESPAN); + add_random_max_connection_age_jitter_and_convert_to_grpc_millis( + value); } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) { const int value = grpc_channel_arg_get_integer( @@ -318,15 +313,13 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, (grpc_integer_options){DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX}); chand->max_connection_age_grace = - value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_IDLE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS); chand->max_connection_idle = - value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } } GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel, @@ -349,8 +342,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, channel_connectivity_changed, chand, grpc_schedule_on_exec_ctx); - if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) != - 0) { + if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) { /* When the channel reaches its max age, we send down an op with goaway_error set. However, we can't send down any ops until after the channel stack is fully initialized. If we start the timer here, we have @@ -367,8 +359,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, /* Initialize the number of calls as 1, so that the max_idle_timer will not start until start_max_idle_timer_after_init is invoked. */ gpr_atm_rel_store(&chand->call_count, 1); - if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) != - 0) { + if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) { GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age start_max_idle_timer_after_init"); GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init, diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c index 60244e163b..46943757c4 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.c +++ b/src/core/ext/transport/chttp2/server/chttp2_server.c @@ -133,8 +133,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, connection_state->handshake_mgr); // TODO(roth): We should really get this timeout value from channel // args instead of hard-coding it. - const gpr_timespec deadline = gpr_time_add( - gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN)); + const grpc_millis deadline = + grpc_exec_ctx_now(exec_ctx) + 120 * GPR_MS_PER_SEC; grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr, tcp, state->args, deadline, acceptor, on_handshake_done, connection_state); diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 1a6e08fd0b..4f38119948 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -156,11 +156,9 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_error *error); -static void send_ping_locked( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate, - grpc_closure *on_complete, - grpc_chttp2_initiate_write_reason initiate_write_reason); +static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, + grpc_closure *on_initiate, + grpc_closure *on_complete); static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, grpc_error *error); @@ -276,6 +274,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, t->is_client = is_client; t->flow_control.remote_window = DEFAULT_WINDOW; t->flow_control.announced_window = DEFAULT_WINDOW; + t->flow_control.target_initial_window_size = DEFAULT_WINDOW; t->flow_control.t = t; t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0; t->is_first_frame = true; @@ -314,16 +313,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_combiner_scheduler(t->combiner)); grpc_bdp_estimator_init(&t->flow_control.bdp_estimator, t->peer_string); - t->flow_control.last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC); - grpc_pid_controller_init( - &t->flow_control.pid_controller, - (grpc_pid_controller_args){.gain_p = 4, - .gain_i = 8, - .gain_d = 0, - .initial_control_value = log2(DEFAULT_WINDOW), - .min_control_value = -1, - .max_control_value = 25, - .integral_range = 10}); grpc_chttp2_goaway_parser_init(&t->goaway_parser); grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser); @@ -362,43 +351,33 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0); } - queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, - DEFAULT_WINDOW); queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, DEFAULT_MAX_HEADER_LIST_SIZE); queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1); t->ping_policy.max_pings_without_data = g_default_max_pings_without_data; - t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis( - g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN); + t->ping_policy.min_sent_ping_interval_without_data = + g_default_min_sent_ping_interval_without_data_ms; t->ping_policy.max_ping_strikes = g_default_max_ping_strikes; - t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis( - g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN); + t->ping_policy.min_recv_ping_interval_without_data = + g_default_min_recv_ping_interval_without_data_ms; /* Keepalive setting */ if (t->is_client) { - t->keepalive_time = - g_default_client_keepalive_time_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_client_keepalive_time_ms, - GPR_TIMESPAN); - t->keepalive_timeout = - g_default_client_keepalive_timeout_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_client_keepalive_timeout_ms, - GPR_TIMESPAN); + t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_client_keepalive_time_ms; + t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_client_keepalive_timeout_ms; } else { - t->keepalive_time = - g_default_server_keepalive_time_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_server_keepalive_time_ms, - GPR_TIMESPAN); - t->keepalive_timeout = - g_default_server_keepalive_timeout_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_server_keepalive_timeout_ms, - GPR_TIMESPAN); + t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_server_keepalive_time_ms; + t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_server_keepalive_timeout_ms; } t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls; @@ -445,25 +424,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, channel_args->args[i].key, GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) { t->ping_policy.min_sent_ping_interval_without_data = - gpr_time_from_millis( - grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){ - g_default_min_sent_ping_interval_without_data_ms, 0, - INT_MAX}), - GPR_TIMESPAN); + grpc_channel_arg_get_integer( + &channel_args->args[i], + (grpc_integer_options){ + g_default_min_sent_ping_interval_without_data_ms, 0, + INT_MAX}); } else if (0 == strcmp( channel_args->args[i].key, GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) { t->ping_policy.min_recv_ping_interval_without_data = - gpr_time_from_millis( - grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){ - g_default_min_recv_ping_interval_without_data_ms, 0, - INT_MAX}), - GPR_TIMESPAN); + grpc_channel_arg_get_integer( + &channel_args->args[i], + (grpc_integer_options){ + g_default_min_recv_ping_interval_without_data_ms, 0, + INT_MAX}); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) { t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer( @@ -481,9 +456,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, ? g_default_client_keepalive_time_ms : g_default_server_keepalive_time_ms, 1, INT_MAX}); - t->keepalive_time = value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) { const int value = grpc_channel_arg_get_integer( @@ -492,9 +465,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, ? g_default_client_keepalive_timeout_ms : g_default_server_keepalive_timeout_ms, 0, INT_MAX}); - t->keepalive_timeout = value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + t->keepalive_timeout = + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) { t->keepalive_permit_without_calls = @@ -574,23 +546,26 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, t->ping_state.pings_before_data_required = 0; t->ping_state.is_delayed_ping_timer_set = false; - t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.ping_strikes = 0; /* Start keepalive pings */ - if (gpr_time_cmp(t->keepalive_time, gpr_inf_future(GPR_TIMESPAN)) != 0) { + if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &t->keepalive_ping_timer, + grpc_exec_ctx_now(exec_ctx) + t->keepalive_time, + &t->init_keepalive_ping_locked); } else { /* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no inflight keeaplive timers */ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED; } + grpc_chttp2_act_on_flowctl_action( + exec_ctx, grpc_chttp2_flowctl_get_action(&t->flow_control, NULL), t, + NULL); + grpc_chttp2_initiate_write(exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE); post_benign_reclaimer(exec_ctx, t); @@ -701,7 +676,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena); grpc_chttp2_data_parser_init(&s->data_parser); grpc_slice_buffer_init(&s->flow_controlled_buffer); - s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + s->deadline = GRPC_MILLIS_INF_FUTURE; GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer); @@ -908,9 +883,6 @@ static void inc_initiate_write_reason( case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS: GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx); break; - case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx); - break; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING: GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( exec_ctx); @@ -1048,6 +1020,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt, write_action, t, scheduler), GRPC_ERROR_NONE); } else { + GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx); set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing"); @@ -1146,14 +1119,12 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx, gpr_log(GPR_ERROR, "Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug " "data equal to \"too_many_pings\""); - double current_keepalive_time_ms = - gpr_timespec_to_micros(t->keepalive_time) / 1000; + double current_keepalive_time_ms = (double)t->keepalive_time; t->keepalive_time = current_keepalive_time_ms > INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis((int64_t)(current_keepalive_time_ms * - KEEPALIVE_TIME_BACKOFF_MULTIPLIER), - GPR_TIMESPAN); + ? GRPC_MILLIS_INF_FUTURE + : (grpc_millis)(current_keepalive_time_ms * + KEEPALIVE_TIME_BACKOFF_MULTIPLIER); } /* lie: use transient failure from the transport to indicate goaway has been @@ -1465,8 +1436,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, t->settings[GRPC_PEER_SETTINGS] [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE]; if (t->is_client) { - s->deadline = - gpr_time_min(s->deadline, s->send_initial_metadata->deadline); + s->deadline = GPR_MIN(s->deadline, s->send_initial_metadata->deadline); } if (metadata_size > metadata_peer_limit) { grpc_chttp2_cancel_stream( @@ -1684,16 +1654,14 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, if (!t->is_client) { if (op->send_initial_metadata) { - gpr_timespec deadline = + grpc_millis deadline = op->payload->send_initial_metadata.send_initial_metadata->deadline; - GPR_ASSERT(0 == - gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline)); + GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE); } if (op->send_trailing_metadata) { - gpr_timespec deadline = + grpc_millis deadline = op->payload->send_trailing_metadata.send_trailing_metadata->deadline; - GPR_ASSERT(0 == - gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline)); + GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE); } } @@ -1717,28 +1685,21 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_error *error) { /* callback remaining pings: they're not allowed to call into the transpot, and maybe they hold resources that need to be freed */ - for (size_t i = 0; i < GRPC_CHTTP2_PING_TYPE_COUNT; i++) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[i]; - for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) { - grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error)); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]); - } + grpc_chttp2_ping_queue *pq = &t->ping_queue; + for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) { + grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error)); + GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]); } GRPC_ERROR_UNREF(error); } -static void send_ping_locked( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate, - grpc_closure *on_ack, - grpc_chttp2_initiate_write_reason initiate_write_reason) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type]; +static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, + grpc_closure *on_initiate, grpc_closure *on_ack) { + grpc_chttp2_ping_queue *pq = &t->ping_queue; grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate, GRPC_ERROR_NONE); - if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack, - GRPC_ERROR_NONE)) { - grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason); - } + grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack, + GRPC_ERROR_NONE); } static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, @@ -1753,8 +1714,7 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, uint64_t id) { - grpc_chttp2_ping_queue *pq = - &t->ping_queues[id % GRPC_CHTTP2_PING_TYPE_COUNT]; + grpc_chttp2_ping_queue *pq = &t->ping_queue; if (pq->inflight_id != id) { char *from = grpc_endpoint_get_peer(t->ep); gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id); @@ -1773,8 +1733,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED; grpc_http2_error_code http_error; grpc_slice slice; - grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL, - &slice, &http_error); + grpc_error_get_status(exec_ctx, error, GRPC_MILLIS_INF_FUTURE, NULL, &slice, + &http_error); grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error, grpc_slice_ref_internal(slice), &t->qbuf); grpc_chttp2_initiate_write(exec_ctx, t, @@ -1784,7 +1744,7 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { - gpr_log(GPR_DEBUG, "PING strike"); + t->ping_recv_state.ping_strikes++; if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes && t->ping_policy.max_ping_strikes != 0) { send_goaway(exec_ctx, t, @@ -1824,9 +1784,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, } if (op->send_ping) { - send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL, - op->send_ping, - GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING); + send_ping_locked(exec_ctx, t, NULL, op->send_ping); + grpc_chttp2_initiate_write(exec_ctx, t, + GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING); } if (op->on_connectivity_state_change != NULL) { @@ -2067,7 +2027,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx, if (!s->read_closed || !s->write_closed) { if (s->id != 0) { grpc_http2_error_code http_error; - grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error); + grpc_error_get_status(exec_ctx, due_to_error, s->deadline, NULL, NULL, + &http_error); grpc_slice_buffer_add( &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error, &s->stats.outgoing)); @@ -2085,7 +2046,7 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_error *error) { grpc_status_code status; grpc_slice slice; - grpc_error_get_status(error, s->deadline, &status, &slice, NULL); + grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL); if (status != GRPC_STATUS_OK) { s->seen_error = true; @@ -2250,7 +2211,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, uint32_t len = 0; grpc_status_code grpc_status; grpc_slice slice; - grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL); + grpc_error_get_status(exec_ctx, error, s->deadline, &grpc_status, &slice, + NULL); GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100); @@ -2467,10 +2429,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx, if (action.need_ping) { GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping"); grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator); - send_ping_locked(exec_ctx, t, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE, - &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked, - GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING); + send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked, + &t->finish_bdp_ping_locked); } } @@ -2578,7 +2538,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp, grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_locked); grpc_chttp2_act_on_flowctl_action( - exec_ctx, grpc_chttp2_flowctl_get_bdp_action(&t->flow_control), t, + exec_ctx, grpc_chttp2_flowctl_get_action(&t->flow_control, NULL), t, NULL); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading"); } else { @@ -2695,24 +2655,22 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_chttp2_stream_map_size(&t->stream_map) > 0) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING; GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end"); - send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, - &t->start_keepalive_ping_locked, - &t->finish_keepalive_ping_locked, - GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING); + send_ping_locked(exec_ctx, t, &t->start_keepalive_ping_locked, + &t->finish_keepalive_ping_locked); + grpc_chttp2_initiate_write(exec_ctx, t, + GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING); } else { GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &t->keepalive_ping_timer, + grpc_exec_ctx_now(exec_ctx) + t->keepalive_time, + &t->init_keepalive_ping_locked); } } else if (error == GRPC_ERROR_CANCELLED) { /* The keepalive ping timer may be cancelled by bdp */ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &t->keepalive_ping_timer, + grpc_exec_ctx_now(exec_ctx) + t->keepalive_time, + &t->init_keepalive_ping_locked); } GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping"); } @@ -2721,10 +2679,9 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog"); - grpc_timer_init( - exec_ctx, &t->keepalive_watchdog_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout), - &t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer, + grpc_exec_ctx_now(exec_ctx) + t->keepalive_time, + &t->keepalive_watchdog_fired_locked); } static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, @@ -2735,10 +2692,9 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer); GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &t->keepalive_ping_timer, + grpc_exec_ctx_now(exec_ctx) + t->keepalive_time, + &t->init_keepalive_ping_locked); } } GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end"); @@ -3189,8 +3145,6 @@ const char *grpc_chttp2_initiate_write_reason_string( return "TRANSPORT_FLOW_CONTROL"; case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS: return "SEND_SETTINGS"; - case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING: - return "BDP_ESTIMATOR_PING"; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING: return "FLOW_CONTROL_UNSTALLED_BY_SETTING"; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE: diff --git a/src/core/ext/transport/chttp2/transport/flow_control.c b/src/core/ext/transport/chttp2/transport/flow_control.c index 569a6349d3..aa1dd2d3cb 100644 --- a/src/core/ext/transport/chttp2/transport/flow_control.c +++ b/src/core/ext/transport/chttp2/transport/flow_control.c @@ -175,11 +175,9 @@ static void trace_action(grpc_chttp2_transport_flowctl* tfc, /* How many bytes of incoming flow control would we like to advertise */ static uint32_t grpc_chttp2_target_announced_window( const grpc_chttp2_transport_flowctl* tfc) { - return (uint32_t)GPR_MIN( - (int64_t)((1u << 31) - 1), - tfc->announced_stream_total_over_incoming_window + - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); + return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1), + tfc->announced_stream_total_over_incoming_window + + tfc->target_initial_window_size); } // we have sent data on the wire, we must track this in our bookkeeping for the @@ -281,13 +279,14 @@ grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc, // Returns a non zero announce integer if we should send a transport window // update uint32_t grpc_chttp2_flowctl_maybe_send_transport_update( - grpc_chttp2_transport_flowctl* tfc) { + grpc_chttp2_transport_flowctl* tfc, bool writing_anyway) { PRETRACE(tfc, NULL); uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc); uint32_t threshold_to_send_transport_window_update = tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4 : target_announced_window / 2; - if (tfc->announced_window <= threshold_to_send_transport_window_update && + if ((writing_anyway || + tfc->announced_window <= threshold_to_send_transport_window_update) && tfc->announced_window != target_announced_window) { uint32_t announce = (uint32_t)GPR_CLAMP( target_announced_window - tfc->announced_window, 0, UINT32_MAX); @@ -394,8 +393,22 @@ static grpc_chttp2_flowctl_urgency delta_is_significant( // guess at the new bdp. static double get_pid_controller_guess(grpc_chttp2_transport_flowctl* tfc, double target) { - double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + if (!tfc->pid_controller_initialized) { + tfc->last_pid_update = now; + tfc->pid_controller_initialized = true; + grpc_pid_controller_init( + &tfc->pid_controller, + (grpc_pid_controller_args){.gain_p = 4, + .gain_i = 8, + .gain_d = 0, + .initial_control_value = target, + .min_control_value = -1, + .max_control_value = 25, + .integral_range = 10}); + return pow(2, target); + } + double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller); gpr_timespec dt_timespec = gpr_time_sub(now, tfc->last_pid_update); double dt = (double)dt_timespec.tv_sec + dt_timespec.tv_nsec * 1e-9; if (dt > 0.1) { @@ -413,8 +426,16 @@ static double get_target_under_memory_pressure( // do not increase window under heavy memory pressure. double memory_pressure = grpc_resource_quota_get_memory_pressure( grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep))); - if (memory_pressure > 0.8) { - target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1); + static const double kLowMemPressure = 0.1; + static const double kZeroTarget = 22; + static const double kHighMemPressure = 0.8; + static const double kMaxMemPressure = 0.9; + if (memory_pressure < kLowMemPressure && target < kZeroTarget) { + target = (target - kZeroTarget) * memory_pressure / kLowMemPressure + + kZeroTarget; + } else if (memory_pressure > kHighMemPressure) { + target *= 1 - GPR_MIN(1, (memory_pressure - kHighMemPressure) / + (kMaxMemPressure - kHighMemPressure)); } return target; } @@ -423,10 +444,6 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action( grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) { grpc_chttp2_flowctl_action action; memset(&action, 0, sizeof(action)); - uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc); - if (tfc->announced_window < target_announced_window / 2) { - action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY; - } // TODO(ncteisen): tune this if (sfc != NULL && !sfc->s->read_closed) { uint32_t sent_init_window = @@ -441,20 +458,11 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action( action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE; } } - TRACEACTION(tfc, action); - return action; -} - -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( - grpc_chttp2_transport_flowctl* tfc) { - grpc_chttp2_flowctl_action action; - memset(&action, 0, sizeof(action)); if (tfc->enable_bdp_probe) { action.need_ping = grpc_bdp_estimator_need_ping(&tfc->bdp_estimator); // get bdp estimate and update initial_window accordingly. int64_t estimate = -1; - int32_t bdp = -1; if (grpc_bdp_estimator_get_estimate(&tfc->bdp_estimator, &estimate)) { double target = 1 + log2((double)estimate); @@ -468,14 +476,15 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( double bdp_guess = get_pid_controller_guess(tfc, target); // Though initial window 'could' drop to 0, we keep the floor at 128 - bdp = GPR_MAX((int32_t)bdp_guess, 128); + tfc->target_initial_window_size = + (int32_t)GPR_CLAMP(bdp_guess, 128, INT32_MAX); grpc_chttp2_flowctl_urgency init_window_update_urgency = - delta_is_significant(tfc, bdp, + delta_is_significant(tfc, tfc->target_initial_window_size, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE); if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) { action.send_setting_update = init_window_update_urgency; - action.initial_window_size = (uint32_t)bdp; + action.initial_window_size = (uint32_t)tfc->target_initial_window_size; } } @@ -484,8 +493,9 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) { // we target the max of BDP or bandwidth in microseconds. int32_t frame_size = (int32_t)GPR_CLAMP( - GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384, - 16777215); + GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, + tfc->target_initial_window_size), + 16384, 16777215); grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant( tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE); if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) { @@ -496,7 +506,10 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( } } } - + uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc); + if (tfc->announced_window < target_announced_window / 2) { + action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY; + } TRACEACTION(tfc, action); return action; } diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c index d431d6b2df..1cfa883ee1 100644 --- a/src/core/ext/transport/chttp2/transport/frame_ping.c +++ b/src/core/ext/transport/chttp2/transport/frame_ping.c @@ -89,10 +89,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes); } else { if (!t->is_client) { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_allowed_ping = - gpr_time_add(t->ping_recv_state.last_ping_recv_time, - t->ping_policy.min_recv_ping_interval_without_data); + grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis next_allowed_ping = + t->ping_recv_state.last_ping_recv_time + + t->ping_policy.min_recv_ping_interval_without_data; if (t->keepalive_permit_without_calls == 0 && grpc_chttp2_stream_map_size(&t->stream_map) == 0) { @@ -100,11 +100,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, no less than two hours. When there is no outstanding streams, we restrict the number of PINGS equivalent to TCP Keep-Alive. */ next_allowed_ping = - gpr_time_add(t->ping_recv_state.last_ping_recv_time, - gpr_time_from_seconds(7200, GPR_TIMESPAN)); + t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC; } - if (gpr_time_cmp(next_allowed_ping, now) > 0) { + if (next_allowed_ping > now) { grpc_chttp2_add_ping_strike(exec_ctx, t); } diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c index 9dfbd3c3a9..3482d2c6f6 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c +++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c @@ -514,12 +514,12 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, #define TIMEOUT_KEY "grpc-timeout" static void deadline_enc(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, gpr_timespec deadline, + grpc_chttp2_hpack_compressor *c, grpc_millis deadline, framer_state *st) { char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE]; grpc_mdelem mdelem; - grpc_http2_encode_timeout( - gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str); + grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx), + timeout_str); mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT, grpc_slice_from_copied_string(timeout_str)); hpack_enc(exec_ctx, c, mdelem, st); @@ -639,8 +639,8 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx, for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) { hpack_enc(exec_ctx, c, l->md, &st); } - gpr_timespec deadline = metadata->deadline; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) { + grpc_millis deadline = metadata->deadline; + if (deadline != GRPC_MILLIS_INF_FUTURE) { deadline_enc(exec_ctx, c, deadline, &st); } diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c index ba680a89db..187ce0ea87 100644 --- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c +++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c @@ -29,7 +29,7 @@ void grpc_chttp2_incoming_metadata_buffer_init( grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) { buffer->arena = arena; grpc_metadata_batch_init(&buffer->batch); - buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE; } void grpc_chttp2_incoming_metadata_buffer_destroy( @@ -62,7 +62,7 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( } void grpc_chttp2_incoming_metadata_buffer_set_deadline( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) { + grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline) { buffer->batch.deadline = deadline; } diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h index a951d8764c..0fc90b2d80 100644 --- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h +++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h @@ -43,6 +43,6 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) GRPC_MUST_USE_RESULT; void grpc_chttp2_incoming_metadata_buffer_set_deadline( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline); + grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */ diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 1682be28dd..b5850564e0 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -62,12 +62,6 @@ typedef enum { } grpc_chttp2_write_state; typedef enum { - GRPC_CHTTP2_PING_ON_NEXT_WRITE = 0, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE, - GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */ -} grpc_chttp2_ping_type; - -typedef enum { GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY, GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT, } grpc_chttp2_optimization_target; @@ -93,7 +87,6 @@ typedef enum { GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, - GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE, GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING, @@ -114,19 +107,19 @@ typedef struct { typedef struct { int max_pings_without_data; int max_ping_strikes; - gpr_timespec min_sent_ping_interval_without_data; - gpr_timespec min_recv_ping_interval_without_data; + grpc_millis min_sent_ping_interval_without_data; + grpc_millis min_recv_ping_interval_without_data; } grpc_chttp2_repeated_ping_policy; typedef struct { - gpr_timespec last_ping_sent_time; + grpc_millis last_ping_sent_time; int pings_before_data_required; grpc_timer delayed_ping_timer; bool is_delayed_ping_timer_set; } grpc_chttp2_repeated_ping_state; typedef struct { - gpr_timespec last_ping_recv_time; + grpc_millis last_ping_recv_time; int ping_strikes; } grpc_chttp2_server_ping_recv_state; @@ -265,6 +258,8 @@ typedef struct { * to send WINDOW_UPDATE frames. */ int64_t announced_window; + int32_t target_initial_window_size; + /** should we probe bdp? */ bool enable_bdp_probe; @@ -272,6 +267,7 @@ typedef struct { grpc_bdp_estimator bdp_estimator; /* pid controller */ + bool pid_controller_initialized; grpc_pid_controller pid_controller; gpr_timespec last_pid_update; @@ -370,7 +366,7 @@ struct grpc_chttp2_transport { uint32_t last_new_stream_id; /** ping queues for various ping insertion points */ - grpc_chttp2_ping_queue ping_queues[GRPC_CHTTP2_PING_TYPE_COUNT]; + grpc_chttp2_ping_queue ping_queue; grpc_chttp2_repeated_ping_policy ping_policy; grpc_chttp2_repeated_ping_state ping_state; uint64_t ping_ctr; /* unique id for pings */ @@ -455,9 +451,9 @@ struct grpc_chttp2_transport { /** watchdog to kill the transport when waiting for the keepalive ping */ grpc_timer keepalive_watchdog_timer; /** time duration in between pings */ - gpr_timespec keepalive_time; + grpc_millis keepalive_time; /** grace period for a ping to complete before watchdog kicks in */ - gpr_timespec keepalive_timeout; + grpc_millis keepalive_timeout; /** if keepalive pings are allowed when there's no outstanding streams */ bool keepalive_permit_without_calls; /** keep-alive state machine state */ @@ -566,7 +562,7 @@ struct grpc_chttp2_stream { grpc_error *byte_stream_error; /* protected by t combiner */ bool received_last_frame; /* protected by t combiner */ - gpr_timespec deadline; + grpc_millis deadline; /** saw some stream level error */ grpc_error *forced_close_error; @@ -705,7 +701,7 @@ grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc, // returns an announce if we should send a transport update to our peer, // else returns zero uint32_t grpc_chttp2_flowctl_maybe_send_transport_update( - grpc_chttp2_transport_flowctl *tfc); + grpc_chttp2_transport_flowctl *tfc, bool writing_anyway); // returns an announce if we should send a stream update to our peer, else // returns zero @@ -754,9 +750,6 @@ typedef struct { grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action( grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc); -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( - grpc_chttp2_transport_flowctl *tfc); - // Takes in a flow control action and performs all the needed operations. void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx, grpc_chttp2_flowctl_action action, diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 3db1ad4123..2086f42cb2 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -385,7 +385,7 @@ error_handler: t->parser_data = &s->data_parser; t->ping_state.pings_before_data_required = t->ping_policy.max_pings_without_data; - t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; return GRPC_ERROR_NONE; } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) { /* handle stream errors by closing the stream */ @@ -430,26 +430,26 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp, } if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) { - gpr_timespec *cached_timeout = - (gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout); - gpr_timespec timeout; + grpc_millis *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout); + grpc_millis timeout; if (cached_timeout == NULL) { /* not already parsed: parse it now, and store the result away */ - cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec)); + cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis)); if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) { char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val); gpr_free(val); - *cached_timeout = gpr_inf_future(GPR_TIMESPAN); + *cached_timeout = GRPC_MILLIS_INF_FUTURE; } timeout = *cached_timeout; grpc_mdelem_set_user_data(md, free_timeout, cached_timeout); } else { timeout = *cached_timeout; } - grpc_chttp2_incoming_metadata_buffer_set_deadline( - &s->metadata_buffer[0], - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout)); + if (timeout != GRPC_MILLIS_INF_FUTURE) { + grpc_chttp2_incoming_metadata_buffer_set_deadline( + &s->metadata_buffer[0], grpc_exec_ctx_now(exec_ctx) + timeout); + } GRPC_MDELEM_UNREF(exec_ctx, md); } else { const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md); @@ -564,7 +564,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, t->ping_state.pings_before_data_required = t->ping_policy.max_pings_without_data; - t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */ s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 7d9d4867e1..2f6228847b 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -42,18 +42,9 @@ static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, t->write_cb_pool = cb; } -static void collapse_pings_from_into(grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, - grpc_chttp2_ping_queue *pq) { - for (size_t i = 0; i < GRPC_CHTTP2_PCL_COUNT; i++) { - grpc_closure_list_move(&t->ping_queues[ping_type].lists[i], &pq->lists[i]); - } -} - static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type]; + grpc_chttp2_transport *t) { + grpc_chttp2_ping_queue *pq = &t->ping_queue; if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { /* no ping needed: wait */ return; @@ -62,7 +53,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, /* ping already in-flight: wait */ if (GRPC_TRACER_ON(grpc_http_trace) || GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string); + gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging", + t->is_client ? "CLIENT" : "SERVER", t->peer_string); } return; } @@ -71,51 +63,38 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, /* need to receive something of substance before sending a ping again */ if (GRPC_TRACER_ON(grpc_http_trace) || GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d", - t->peer_string, t->ping_state.pings_before_data_required, + gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d", + t->is_client ? "CLIENT" : "SERVER", t->peer_string, + t->ping_state.pings_before_data_required, t->ping_policy.max_pings_without_data); } return; } - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_allowed_ping = - gpr_time_add(t->ping_state.last_ping_sent_time, - t->ping_policy.min_sent_ping_interval_without_data); + grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis next_allowed_ping = + t->ping_state.last_ping_sent_time + + t->ping_policy.min_sent_ping_interval_without_data; if (t->keepalive_permit_without_calls == 0 && grpc_chttp2_stream_map_size(&t->stream_map) == 0) { - next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time, - gpr_time_from_seconds(7200, GPR_TIMESPAN)); + next_allowed_ping = + t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC; } - /* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d", - (int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec, - (int)now.tv_sec, (int)now.tv_nsec); */ - if (gpr_time_cmp(next_allowed_ping, now) > 0) { + if (next_allowed_ping > now) { /* not enough elapsed time between successive pings */ if (GRPC_TRACER_ON(grpc_http_trace) || GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { gpr_log(GPR_DEBUG, - "Ping delayed [%p]: not enough time elapsed since last ping", - t->peer_string); + "%s: Ping delayed [%p]: not enough time elapsed since last ping", + t->is_client ? "CLIENT" : "SERVER", t->peer_string); } if (!t->ping_state.is_delayed_ping_timer_set) { t->ping_state.is_delayed_ping_timer_set = true; grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer, - next_allowed_ping, &t->retry_initiate_ping_locked, - gpr_now(GPR_CLOCK_MONOTONIC)); + next_allowed_ping, &t->retry_initiate_ping_locked); } return; } - /* coalesce equivalent pings into this one */ - switch (ping_type) { - case GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE: - collapse_pings_from_into(t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, pq); - break; - case GRPC_CHTTP2_PING_ON_NEXT_WRITE: - break; - case GRPC_CHTTP2_PING_TYPE_COUNT: - GPR_UNREACHABLE_CODE(break); - } - pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type; + pq->inflight_id = t->ping_ctr; t->ping_ctr++; GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]); grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT], @@ -126,7 +105,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, t->ping_state.last_ping_sent_time = now; if (GRPC_TRACER_ON(grpc_http_trace) || GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string, + gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d", + t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->ping_state.pings_before_data_required, t->ping_policy.max_pings_without_data); } @@ -156,6 +136,25 @@ static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, return sched_any; } +static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s, + const char *staller) { + gpr_log( + GPR_DEBUG, + "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64 + ":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]", + t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length, + s->flow_controlled_bytes_flowed, + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], + t->flow_control.remote_window, + (uint32_t)GPR_MAX( + 0, + s->flow_control.remote_window_delta + + (int64_t)t->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]), + s->flow_control.remote_window_delta); +} + static bool stream_ref_if_not_destroyed(gpr_refcount *r) { gpr_atm count; do { @@ -202,6 +201,12 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx); } + for (size_t i = 0; i < t->ping_ack_count; i++) { + grpc_slice_buffer_add(&t->outbuf, + grpc_chttp2_ping_create(1, t->ping_acks[i])); + } + t->ping_ack_count = 0; + /* simple writes are queued to qbuf, and flushed here */ grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf); GPR_ASSERT(t->qbuf.count == 0); @@ -270,8 +275,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->send_initial_metadata, &hopt, &t->outbuf); now_writing = true; if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.ping_strikes = 0; } initial_metadata_writes++; @@ -300,6 +304,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE, "send_initial_metadata_finished"); } + /* send any window updates */ uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update( &t->flow_control, &s->flow_control); @@ -308,8 +313,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( &t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce, &s->stats.outgoing)); if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.ping_strikes = 0; } flow_control_writes++; @@ -384,8 +388,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->sending_bytes += send_bytes; } if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = 0; t->ping_recv_state.ping_strikes = 0; } if (is_last_frame) { @@ -413,9 +416,11 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( } message_writes++; } else if (t->flow_control.remote_window == 0) { + report_stall(t, s, "transport"); grpc_chttp2_list_add_stalled_by_transport(t, s); now_writing = true; } else if (stream_remote_window == 0) { + report_stall(t, s, "stream"); grpc_chttp2_list_add_stalled_by_stream(t, s); now_writing = true; } @@ -451,6 +456,10 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( } s->send_trailing_metadata = NULL; s->sent_trailing_metadata = true; + if (!t->is_client) { + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; + t->ping_recv_state.ping_strikes = 0; + } if (!t->is_client && !s->read_closed) { grpc_slice_buffer_add( &t->outbuf, grpc_chttp2_rst_stream_create( @@ -484,30 +493,21 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( } } - uint32_t transport_announce = - grpc_chttp2_flowctl_maybe_send_transport_update(&t->flow_control); + maybe_initiate_ping(exec_ctx, t); + + uint32_t transport_announce = grpc_chttp2_flowctl_maybe_send_transport_update( + &t->flow_control, t->outbuf.count > 0); if (transport_announce) { - maybe_initiate_ping(exec_ctx, t, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE); grpc_transport_one_way_stats throwaway_stats; grpc_slice_buffer_add( &t->outbuf, grpc_chttp2_window_update_create(0, transport_announce, &throwaway_stats)); if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.ping_strikes = 0; } } - for (size_t i = 0; i < t->ping_ack_count; i++) { - grpc_slice_buffer_add(&t->outbuf, - grpc_chttp2_ping_create(1, t->ping_acks[i])); - } - t->ping_ack_count = 0; - - maybe_initiate_ping(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE); - GPR_TIMER_END("grpc_chttp2_begin_write", 0); result.writing = t->outbuf.count > 0; diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c index 31739d07dd..1001d74c22 100644 --- a/src/core/ext/transport/inproc/inproc_transport.c +++ b/src/core/ext/transport/inproc/inproc_transport.c @@ -150,7 +150,7 @@ typedef struct inproc_stream { grpc_metadata_batch write_buffer_initial_md; bool write_buffer_initial_md_filled; uint32_t write_buffer_initial_md_flags; - gpr_timespec write_buffer_deadline; + grpc_millis write_buffer_deadline; slice_buffer_list write_buffer_message; grpc_metadata_batch write_buffer_trailing_md; bool write_buffer_trailing_md_filled; @@ -180,7 +180,7 @@ typedef struct inproc_stream { grpc_error *cancel_self_error; grpc_error *cancel_other_error; - gpr_timespec deadline; + grpc_millis deadline; bool listed; struct inproc_stream *stream_list_prev; @@ -377,8 +377,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, s->cancel_self_error = GRPC_ERROR_NONE; s->cancel_other_error = GRPC_ERROR_NONE; s->write_buffer_cancel_error = GRPC_ERROR_NONE; - s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + s->deadline = GRPC_MILLIS_INF_FUTURE; + s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE; s->stream_list_prev = NULL; gpr_mu_lock(&t->mu->mu); @@ -421,7 +421,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, cs->write_buffer_initial_md_flags, &s->to_read_initial_md, &s->to_read_initial_md_flags, &s->to_read_initial_md_filled); - s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline); + s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline); grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md); cs->write_buffer_initial_md_filled = false; } @@ -956,10 +956,10 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, dest, destflags, destfilled); } if (s->t->is_client) { - gpr_timespec *dl = + grpc_millis *dl = (other == NULL) ? &s->write_buffer_deadline : &other->deadline; - *dl = gpr_time_min(*dl, op->payload->send_initial_metadata - .send_initial_metadata->deadline); + *dl = GPR_MIN(*dl, op->payload->send_initial_metadata + .send_initial_metadata->deadline); s->initial_md_sent = true; } } diff --git a/src/core/lib/support/backoff.c b/src/core/lib/backoff/backoff.c index 6dc0df473b..b34c4a852b 100644 --- a/src/core/lib/support/backoff.c +++ b/src/core/lib/backoff/backoff.c @@ -16,13 +16,14 @@ * */ -#include "src/core/lib/support/backoff.h" +#include "src/core/lib/backoff/backoff.h" #include <grpc/support/useful.h> -void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout, - double multiplier, double jitter, - int64_t min_timeout_millis, int64_t max_timeout_millis) { +void grpc_backoff_init(grpc_backoff *backoff, + grpc_millis initial_connect_timeout, double multiplier, + double jitter, grpc_millis min_timeout_millis, + grpc_millis max_timeout_millis) { backoff->initial_connect_timeout = initial_connect_timeout; backoff->multiplier = multiplier; backoff->jitter = jitter; @@ -31,11 +32,11 @@ void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout, backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec; } -gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) { +grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) { backoff->current_timeout_millis = backoff->initial_connect_timeout; - const int64_t first_timeout = + const grpc_millis first_timeout = GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis); - return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN)); + return grpc_exec_ctx_now(exec_ctx) + first_timeout; } /* Generate a random number between 0 and 1. */ @@ -44,7 +45,7 @@ static double generate_uniform_random_number(uint32_t *rng_state) { return *rng_state / (double)((uint32_t)1 << 31); } -gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) { +grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) { const double new_timeout_millis = backoff->multiplier * (double)backoff->current_timeout_millis; backoff->current_timeout_millis = @@ -58,15 +59,15 @@ gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) { backoff->current_timeout_millis = (int64_t)((double)(backoff->current_timeout_millis) + jitter); - const gpr_timespec current_deadline = gpr_time_add( - now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN)); + const grpc_millis current_deadline = + grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis; - const gpr_timespec min_deadline = gpr_time_add( - now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN)); + const grpc_millis min_deadline = + grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis; - return gpr_time_max(current_deadline, min_deadline); + return GPR_MAX(current_deadline, min_deadline); } -void gpr_backoff_reset(gpr_backoff *backoff) { +void grpc_backoff_reset(grpc_backoff *backoff) { backoff->current_timeout_millis = backoff->initial_connect_timeout; } diff --git a/src/core/lib/support/backoff.h b/src/core/lib/backoff/backoff.h index 6e0cc3a4b6..32efdb9d92 100644 --- a/src/core/lib/support/backoff.h +++ b/src/core/lib/backoff/backoff.h @@ -16,41 +16,43 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_BACKOFF_H -#define GRPC_CORE_LIB_SUPPORT_BACKOFF_H +#ifndef GRPC_CORE_LIB_BACKOFF_BACKOFF_H +#define GRPC_CORE_LIB_BACKOFF_BACKOFF_H -#include <grpc/support/time.h> +#include "src/core/lib/iomgr/exec_ctx.h" typedef struct { /// const: how long to wait after the first failure before retrying - int64_t initial_connect_timeout; + grpc_millis initial_connect_timeout; /// const: factor with which to multiply backoff after a failed retry double multiplier; /// const: amount to randomize backoffs double jitter; /// const: minimum time between retries in milliseconds - int64_t min_timeout_millis; + grpc_millis min_timeout_millis; /// const: maximum time between retries in milliseconds - int64_t max_timeout_millis; + grpc_millis max_timeout_millis; /// random number generator uint32_t rng_state; /// current retry timeout in milliseconds - int64_t current_timeout_millis; -} gpr_backoff; + grpc_millis current_timeout_millis; +} grpc_backoff; /// Initialize backoff machinery - does not need to be destroyed -void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout, - double multiplier, double jitter, - int64_t min_timeout_millis, int64_t max_timeout_millis); +void grpc_backoff_init(grpc_backoff *backoff, + grpc_millis initial_connect_timeout, double multiplier, + double jitter, grpc_millis min_timeout_millis, + grpc_millis max_timeout_millis); /// Begin retry loop: returns a timespec for the NEXT retry -gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now); +grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff); /// Step a retry loop: returns a timespec for the NEXT retry -gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now); -/// Reset the backoff, so the next gpr_backoff_step will be a gpr_backoff_begin +grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff); +/// Reset the backoff, so the next grpc_backoff_step will be a +/// grpc_backoff_begin /// instead -void gpr_backoff_reset(gpr_backoff *backoff); +void grpc_backoff_reset(grpc_backoff *backoff); -#endif /* GRPC_CORE_LIB_SUPPORT_BACKOFF_H */ +#endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */ diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index f0de80f0c0..5c00c09889 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -70,7 +70,7 @@ typedef struct { grpc_call_context_element *context; grpc_slice path; gpr_timespec start_time; - gpr_timespec deadline; + grpc_millis deadline; gpr_arena *arena; grpc_call_combiner *call_combiner; } grpc_call_element_args; diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c index 1753da5721..b27ee37e5b 100644 --- a/src/core/lib/channel/handshaker.c +++ b/src/core/lib/channel/handshaker.c @@ -232,7 +232,7 @@ static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { void grpc_handshake_manager_do_handshake( grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data) { gpr_mu_lock(&mgr->mu); GPR_ASSERT(mgr->index == 0); @@ -255,9 +255,7 @@ void grpc_handshake_manager_do_handshake( gpr_ref(&mgr->refs); GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &mgr->deadline_timer, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &mgr->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout); // Start first handshaker, which also owns a ref. gpr_ref(&mgr->refs); bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE); diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h index eb9a59bd08..09b4a27c1c 100644 --- a/src/core/lib/channel/handshaker.h +++ b/src/core/lib/channel/handshaker.h @@ -145,7 +145,7 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, void grpc_handshake_manager_do_handshake( grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data); /// Add \a mgr to the server side list of all pending handshake managers, the diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h index 844dff81a3..0daaa9e655 100644 --- a/src/core/lib/compression/stream_compression.h +++ b/src/core/lib/compression/stream_compression.h @@ -87,4 +87,4 @@ grpc_stream_compression_context *grpc_stream_compression_context_create( void grpc_stream_compression_context_destroy( grpc_stream_compression_context *ctx); -#endif +#endif /* GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_H */ diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 8eaf308b71..546e38b289 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -77,6 +77,7 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "http2_initiate_write_due_to_transport_flow_control_unstalled", "http2_initiate_write_due_to_ping_response", "http2_initiate_write_due_to_force_rst_stream", + "http2_spurious_writes_begun", "combiner_locks_initiated", "combiner_locks_scheduled_items", "combiner_locks_scheduled_final_items", @@ -157,6 +158,7 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "'transport_flow_control_unstalled'", "Number of HTTP2 writes initiated due to 'ping_response'", "Number of HTTP2 writes initiated due to 'force_rst_stream'", + "Number of HTTP2 writes initiated with nothing to write", "Number of combiner lock entries by process (first items queued to a " "combiner)", "Number of items scheduled against combiner locks", diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 099302f29c..5358aefeb0 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -79,6 +79,7 @@ typedef enum { GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED, GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE, GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM, + GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN, GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED, GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS, GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS, @@ -309,6 +310,9 @@ typedef enum { GRPC_STATS_INC_COUNTER( \ (exec_ctx), \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM) +#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN) #define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), \ GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 2fd23ce675..8ebe321c1d 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -189,6 +189,8 @@ doc: Number of HTTP2 writes initiated due to 'ping_response' - counter: http2_initiate_write_due_to_force_rst_stream doc: Number of HTTP2 writes initiated due to 'force_rst_stream' +- counter: http2_spurious_writes_begun + doc: Number of HTTP2 writes initiated with nothing to write # combiner locks - counter: combiner_locks_initiated doc: Number of combiner lock entries by process diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql index b10985e449..3a73784c7b 100644 --- a/src/core/lib/debug/stats_data_bq_schema.sql +++ b/src/core/lib/debug/stats_data_bq_schema.sql @@ -52,6 +52,7 @@ http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT, http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT, http2_initiate_write_due_to_ping_response_per_iteration:FLOAT, http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT, +http2_spurious_writes_begun_per_iteration:FLOAT, combiner_locks_initiated_per_iteration:FLOAT, combiner_locks_scheduled_items_per_iteration:FLOAT, combiner_locks_scheduled_final_items_per_iteration:FLOAT, diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c index db995943a9..c96800b85c 100644 --- a/src/core/lib/http/httpcli.c +++ b/src/core/lib/http/httpcli.c @@ -44,7 +44,7 @@ typedef struct { grpc_endpoint *ep; char *host; char *ssl_host_override; - gpr_timespec deadline; + grpc_millis deadline; int have_read_byte; const grpc_httpcli_handshaker *handshaker; grpc_closure *on_done; @@ -65,7 +65,7 @@ static grpc_httpcli_post_override g_post_override = NULL; static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint, const char *host, - gpr_timespec deadline, + grpc_millis deadline, void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint)) { @@ -240,7 +240,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response, const char *name, grpc_slice request_text) { internal_request *req = @@ -278,9 +278,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, - grpc_httpcli_response *response) { + const grpc_httpcli_request *request, grpc_millis deadline, + grpc_closure *on_done, grpc_httpcli_response *response) { char *name; if (g_get_override && g_get_override(exec_ctx, request, deadline, on_done, response)) { @@ -298,7 +297,7 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, const char *body_bytes, size_t body_size, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { char *name; if (g_post_override && diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h index 809618695e..a3baf512a3 100644 --- a/src/core/lib/http/httpcli.h +++ b/src/core/lib/http/httpcli.h @@ -42,7 +42,7 @@ typedef struct grpc_httpcli_context { typedef struct { const char *default_port; void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint, - const char *host, gpr_timespec deadline, + const char *host, grpc_millis deadline, void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint)); } grpc_httpcli_handshaker; @@ -83,8 +83,8 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_complete, + const grpc_httpcli_request *request, grpc_millis deadline, + grpc_closure *on_complete, grpc_httpcli_response *response); /* Asynchronously perform a HTTP POST. @@ -106,18 +106,18 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, const char *body_bytes, size_t body_size, - gpr_timespec deadline, grpc_closure *on_complete, + grpc_millis deadline, grpc_closure *on_complete, grpc_httpcli_response *response); /* override functions return 1 if they handled the request, 0 otherwise */ typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, + grpc_millis deadline, grpc_closure *on_complete, grpc_httpcli_response *response); typedef int (*grpc_httpcli_post_override)( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, gpr_timespec deadline, + const char *body_bytes, size_t body_size, grpc_millis deadline, grpc_closure *on_complete, grpc_httpcli_response *response); void grpc_httpcli_set_override(grpc_httpcli_get_override get, diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c index c553fa3981..355dfce2a6 100644 --- a/src/core/lib/http/httpcli_security_connector.c +++ b/src/core/lib/http/httpcli_security_connector.c @@ -156,7 +156,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, const char *host, - gpr_timespec deadline, + grpc_millis deadline, void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint)) { on_done_closure *c = gpr_malloc(sizeof(*c)); diff --git a/src/core/lib/support/block_annotate.h b/src/core/lib/iomgr/block_annotate.h index 8e3ef7df65..cbcb5d92f0 100644 --- a/src/core/lib/support/block_annotate.h +++ b/src/core/lib/iomgr/block_annotate.h @@ -16,8 +16,8 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H -#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H +#ifndef GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H +#define GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H #ifdef __cplusplus extern "C" { @@ -47,9 +47,13 @@ void gpr_thd_end_blocking_region(); #define GRPC_SCHEDULING_START_BLOCKING_REGION \ do { \ } while (0) -#define GRPC_SCHEDULING_END_BLOCKING_REGION \ - do { \ +#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \ + do { \ + } while (0) +#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \ + do { \ + grpc_exec_ctx_invalidate_now((ec)); \ } while (0) #endif -#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */ +#endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */ diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 3ac12ab56f..1b5abdb277 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -25,6 +25,7 @@ #include <assert.h> #include <errno.h> +#include <limits.h> #include <poll.h> #include <pthread.h> #include <string.h> @@ -40,12 +41,12 @@ #include <grpc/support/useful.h> #include "src/core/lib/debug/stats.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/string.h" static grpc_wakeup_fd global_wakeup_fd; @@ -562,25 +563,17 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, GPR_TIMER_END("pollset_shutdown", 0); } -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, now) <= 0) { +static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + if (delta > INT_MAX) { + return INT_MAX; + } else if (delta < 0) { return 0; + } else { + return (int)delta; } - - static const gpr_timespec round_up = { - 0, /* tv_sec */ - GPR_NS_PER_MS - 1, /* tv_nsec */ - GPR_TIMESPAN /* clock_type */ - }; - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); - return millis >= 1 ? millis : 1; } /* Process the epoll events found by do_epoll_wait() function. @@ -637,11 +630,11 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, (i.e the designated poller thread) will be calling this function. So there is no need for any synchronization when accesing fields in g_epoll_set */ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { GPR_TIMER_BEGIN("do_epoll_wait", 0); int r; - int timeout = poll_deadline_to_millis_timeout(deadline, now); + int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); if (timeout != 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; } @@ -651,7 +644,7 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, timeout); } while (r < 0 && errno == EINTR); if (timeout != 0) { - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); } if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); @@ -669,9 +662,10 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, return GRPC_ERROR_NONE; } -static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl, gpr_timespec *now, - gpr_timespec deadline) { +static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_pollset_worker *worker, + grpc_pollset_worker **worker_hdl, + grpc_millis deadline) { GPR_TIMER_BEGIN("begin_worker", 0); if (worker_hdl != NULL) *worker_hdl = worker; worker->initialized_cv = false; @@ -756,14 +750,15 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->shutting_down); } - if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && + if (gpr_cv_wait(&worker->cv, &pollset->mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) && worker->state == UNKICKED) { /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker received a kick */ SET_KICK_STATE(worker, KICKED); } } - *now = gpr_now(now->clock_type); + grpc_exec_ctx_invalidate_now(exec_ctx); } if (GRPC_TRACER_ON(grpc_polling_trace)) { @@ -942,7 +937,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, ensure that it is held by the time the function returns */ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { grpc_pollset_worker worker; grpc_error *error = GRPC_ERROR_NONE; static const char *err_desc = "pollset_work"; @@ -953,7 +948,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, return GRPC_ERROR_NONE; } - if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) { + if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) { gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!ps->shutting_down); @@ -976,8 +971,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, designated poller */ if (gpr_atm_acq_load(&g_epoll_set.cursor) == gpr_atm_acq_load(&g_epoll_set.num_events)) { - append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline), - err_desc); + append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc); } append_error(&error, process_epoll_events(exec_ctx, ps), err_desc); diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c index 8eb4de44d9..8aaad8808a 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.c +++ b/src/core/lib/iomgr/ev_epollex_linux.c @@ -25,6 +25,7 @@ #include <assert.h> #include <errno.h> +#include <limits.h> #include <poll.h> #include <pthread.h> #include <string.h> @@ -38,7 +39,7 @@ #include <grpc/support/useful.h> #include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/is_epollexclusive_available.h" #include "src/core/lib/iomgr/lockfree_event.h" @@ -46,20 +47,18 @@ #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/spinlock.h" /******************************************************************************* * Polling object */ - typedef enum { PO_POLLING_GROUP, PO_POLLSET_SET, PO_POLLSET, - PO_FD, /* ordering is important: we always want to lock pollsets before fds: - this guarantees that using an fd as a pollable is safe */ - PO_EMPTY_POLLABLE, + PO_FD, + /* ordering is important: we always want to lock pollsets before fds: + this guarantees that using an fd as a pollable is safe */ PO_EMPTY_POLLABLE, PO_COUNT } polling_obj_type; @@ -690,32 +689,16 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &pollset->pollable_obj.po.mu; } -/* Convert a timespec to milliseconds: - - Very small or negative poll times are clamped to zero to do a non-blocking - poll (which becomes spin polling) - - Other small values are rounded up to one millisecond - - Longer than a millisecond polls are rounded up to the next nearest - millisecond to avoid spinning - - Infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, now) <= 0) { +static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + if (delta > INT_MAX) + return INT_MAX; + else if (delta < 0) return 0; - } - - static const gpr_timespec round_up = { - 0, /* tv_sec */ - GPR_NS_PER_MS - 1, /* tv_nsec */ - GPR_TIMESPAN /* clock_type */ - }; - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); - return millis >= 1 ? millis : 1; + else + return (int)delta; } static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, @@ -810,9 +793,8 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { } static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - pollable *p, gpr_timespec now, - gpr_timespec deadline) { - int timeout = poll_deadline_to_millis_timeout(deadline, now); + pollable *p, grpc_millis deadline) { + int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); if (GRPC_TRACER_ON(grpc_polling_trace)) { char *desc = pollable_desc(p); @@ -829,7 +811,7 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout); } while (r < 0 && errno == EINTR); if (timeout != 0) { - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); } if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); @@ -884,9 +866,10 @@ static worker_remove_result worker_remove(grpc_pollset_worker **root, } /* Return true if this thread should poll */ -static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl, gpr_timespec *now, - gpr_timespec deadline) { +static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_pollset_worker *worker, + grpc_pollset_worker **worker_hdl, + grpc_millis deadline) { bool do_poll = true; if (worker_hdl != NULL) *worker_hdl = worker; worker->initialized_cv = false; @@ -910,10 +893,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, worker->pollable_obj->root_worker != worker) { gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, worker->pollable_obj, worker, - poll_deadline_to_millis_timeout(deadline, *now)); + poll_deadline_to_millis_timeout(exec_ctx, deadline)); } while (do_poll && worker->pollable_obj->root_worker != worker) { - if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) { + if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset, worker->pollable_obj, worker); @@ -936,7 +920,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_mu_lock(&pollset->pollable_obj.po.mu); gpr_mu_lock(&worker->pollable_obj->po.mu); } - *now = gpr_now(now->clock_type); + grpc_exec_ctx_invalidate_now(exec_ctx); } return do_poll && pollset->shutdown_closure == NULL && @@ -967,14 +951,13 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, ensure that it is held by the time the function returns */ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { grpc_pollset_worker worker; if (0 && GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRId64 - ".%09d deadline=%" PRId64 ".%09d kwp=%d root_worker=%p", - pollset, worker_hdl, &worker, now.tv_sec, now.tv_nsec, - deadline.tv_sec, deadline.tv_nsec, pollset->kicked_without_poller, - pollset->root_worker); + gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR + " deadline=%" PRIdPTR " kwp=%d root_worker=%p", + pollset, worker_hdl, &worker, grpc_exec_ctx_now(exec_ctx), deadline, + pollset->kicked_without_poller, pollset->root_worker); } grpc_error *error = GRPC_ERROR_NONE; static const char *err_desc = "pollset_work"; @@ -985,7 +968,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->current_pollable_obj != &pollset->pollable_obj) { gpr_mu_lock(&pollset->current_pollable_obj->po.mu); } - if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) { + if (begin_worker(exec_ctx, pollset, &worker, worker_hdl, deadline)) { gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!pollset->shutdown_closure); @@ -996,7 +979,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_mu_unlock(&pollset->pollable_obj.po.mu); if (pollset->event_cursor == pollset->event_count) { append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj, - now, deadline), + deadline), err_desc); } append_error(&error, pollset_process_events(exec_ctx, pollset, false), diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c index 4d8bdf1401..ba2d448d6d 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.c +++ b/src/core/lib/iomgr/ev_epollsig_linux.c @@ -25,6 +25,7 @@ #include <assert.h> #include <errno.h> +#include <limits.h> #include <poll.h> #include <pthread.h> #include <signal.h> @@ -40,13 +41,13 @@ #include <grpc/support/useful.h> #include "src/core/lib/debug/stats.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" #define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) @@ -1089,30 +1090,16 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutdown_done = NULL; } -/* Convert a timespec to milliseconds: - - Very small or negative poll times are clamped to zero to do a non-blocking - poll (which becomes spin polling) - - Other small values are rounded up to one millisecond - - Longer than a millisecond polls are rounded up to the next nearest - millisecond to avoid spinning - - Infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { +static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + if (delta > INT_MAX) + return INT_MAX; + else if (delta < 0) return 0; - } - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); - return millis >= 1 ? millis : 1; + else + return (int)delta; } static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, @@ -1243,7 +1230,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask); - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); if (ep_rv < 0) { if (errno != EINTR) { gpr_asprintf(&err_msg, @@ -1310,10 +1297,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, ensure that it is held by the time the function returns */ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { GPR_TIMER_BEGIN("pollset_work", 0); grpc_error *error = GRPC_ERROR_NONE; - int timeout_ms = poll_deadline_to_millis_timeout(deadline, now); + int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline); sigset_t new_mask; diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index e170702dca..036a35690c 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -24,6 +24,7 @@ #include <assert.h> #include <errno.h> +#include <limits.h> #include <poll.h> #include <string.h> #include <sys/socket.h> @@ -37,12 +38,11 @@ #include <grpc/support/useful.h> #include "src/core/lib/debug/stats.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_cv.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/murmur_hash.h" #define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) @@ -50,7 +50,6 @@ /******************************************************************************* * FD declarations */ - typedef struct grpc_fd_watcher { struct grpc_fd_watcher *next; struct grpc_fd_watcher *prev; @@ -200,8 +199,8 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - longer than a millisecond polls are rounded up to the next nearest millisecond to avoid spinning - infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now); +static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis deadline); /* Allow kick to wakeup the currently polling worker */ #define GRPC_POLLSET_CAN_KICK_SELF 1 @@ -876,7 +875,7 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) { static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; grpc_error *error = GRPC_ERROR_NONE; @@ -945,7 +944,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd_watcher *watchers; struct pollfd *pfds; - timeout = poll_deadline_to_millis_timeout(deadline, now); + timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); if (pollset->fd_count + 2 <= inline_elements) { pfds = pollfd_space; @@ -991,7 +990,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, GRPC_SCHEDULING_START_BLOCKING_REGION; GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); r = grpc_poll_function(pfds, pfd_count, timeout); - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r); @@ -1068,13 +1067,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (queued_work || worker.kicked_specifically) { /* If there's queued work on the list, then set the deadline to be immediate so we get back out of the polling loop quickly */ - deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); + deadline = 0; } keep_polling = 1; } - if (keep_polling) { - now = gpr_now(now.clock_type); - } } gpr_tls_set(&g_current_thread_poller, 0); if (added_worker) { @@ -1126,21 +1122,14 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } } -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { - return 0; - } - timeout = gpr_time_sub(deadline, now); - return gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); +static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis deadline) { + if (deadline == GRPC_MILLIS_INF_FUTURE) return -1; + if (deadline == 0) return 0; + grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx); + if (n < 0) return 0; + if (n > INT_MAX) return -1; + return (int)n; } /******************************************************************************* diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index 4d3ae2228e..e4033fab1d 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -205,9 +205,9 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { } grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline) { - return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline); + grpc_pollset_worker **worker, + grpc_millis deadline) { + return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline); } grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 1ff2ff1413..df88ec458a 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -52,8 +52,8 @@ typedef struct grpc_event_engine_vtable { grpc_closure *closure); void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline); + grpc_pollset_worker **worker, + grpc_millis deadline); grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *specific_worker); void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c index 41c69add17..3d17afcb8f 100644 --- a/src/core/lib/iomgr/exec_ctx.c +++ b/src/core/lib/iomgr/exec_ctx.c @@ -104,9 +104,69 @@ static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_closure_list_append(&exec_ctx->closure_list, closure, error); } -void grpc_exec_ctx_global_init(void) {} +static gpr_timespec + g_start_time[GPR_TIMESPAN + 1]; // assumes GPR_TIMESPAN is the + // last enum value in + // gpr_clock_type + +void grpc_exec_ctx_global_init(void) { + for (int i = 0; i < GPR_TIMESPAN; i++) { + g_start_time[i] = gpr_now((gpr_clock_type)i); + } + // allows uniform treatment in conversion functions + g_start_time[GPR_TIMESPAN] = gpr_time_0(GPR_TIMESPAN); +} + void grpc_exec_ctx_global_shutdown(void) {} +static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) { + ts = gpr_time_sub(ts, g_start_time[ts.clock_type]); + double x = + GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS; + if (x < 0) return 0; + if (x > GPR_ATM_MAX) return GPR_ATM_MAX; + return (gpr_atm)x; +} + +static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { + ts = gpr_time_sub(ts, g_start_time[ts.clock_type]); + double x = GPR_MS_PER_SEC * (double)ts.tv_sec + + (double)ts.tv_nsec / GPR_NS_PER_MS + + (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC; + if (x < 0) return 0; + if (x > GPR_ATM_MAX) return GPR_ATM_MAX; + return (gpr_atm)x; +} + +grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) { + if (!exec_ctx->now_is_valid) { + exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); + exec_ctx->now_is_valid = true; + } + return exec_ctx->now; +} + +void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx) { + exec_ctx->now_is_valid = false; +} + +gpr_timespec grpc_millis_to_timespec(grpc_millis millis, + gpr_clock_type clock_type) { + if (clock_type == GPR_TIMESPAN) { + return gpr_time_from_millis(millis, GPR_TIMESPAN); + } + return gpr_time_add(g_start_time[clock_type], + gpr_time_from_millis(millis, GPR_TIMESPAN)); +} + +grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) { + return timespec_to_atm_round_down(ts); +} + +grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) { + return timespec_to_atm_round_up(ts); +} + static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = { exec_ctx_run, exec_ctx_sched, "exec_ctx"}; static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable}; diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index c89792c8c4..3caf775013 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -19,10 +19,15 @@ #ifndef GRPC_CORE_LIB_IOMGR_EXEC_CTX_H #define GRPC_CORE_LIB_IOMGR_EXEC_CTX_H +#include <grpc/support/atm.h> #include <grpc/support/cpu.h> + #include "src/core/lib/iomgr/closure.h" -/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */ +typedef gpr_atm grpc_millis; + +#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX +#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN /** A workqueue represents a list of work to be executed asynchronously. Forward declared here to avoid a circular dependency with workqueue.h. */ @@ -66,6 +71,9 @@ struct grpc_exec_ctx { unsigned starting_cpu; void *check_ready_to_finish_arg; bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg); + + bool now_is_valid; + grpc_millis now; }; /* initializer for grpc_exec_ctx: @@ -73,7 +81,7 @@ struct grpc_exec_ctx { #define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \ { \ GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \ - finish_check_arg, finish_check \ + finish_check_arg, finish_check, false, 0 \ } /* initialize an execution context at the top level of an API call into grpc @@ -106,4 +114,10 @@ void grpc_exec_ctx_global_init(void); void grpc_exec_ctx_global_init(void); void grpc_exec_ctx_global_shutdown(void); +grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx); +void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx); +gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock); +grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec); +grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec); + #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */ diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 2439f15a8a..cb4f506aed 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -184,6 +184,7 @@ static void executor_thread(void *arg) { gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state)); } + grpc_exec_ctx_invalidate_now(&exec_ctx); run_closures(&exec_ctx, &ts->local_elems); } grpc_exec_ctx_finish(&exec_ctx); diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c index c082179c0b..3c38105ef9 100644 --- a/src/core/lib/iomgr/iocp_windows.c +++ b/src/core/lib/iomgr/iocp_windows.c @@ -40,25 +40,17 @@ static gpr_atm g_custom_events = 0; static HANDLE g_iocp; -static DWORD deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { +static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, + grpc_millis deadline) { gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { + if (deadline == GRPC_MILLIS_INF_FUTURE) == 0) { return INFINITE; } - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { - return 0; - } - timeout = gpr_time_sub(deadline, now); - return (DWORD)gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); + return (DWORD)GPR_MAX(0, deadline - grpc_exec_ctx_now(exec_ctx)); } grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - gpr_timespec deadline) { + grpc_millis deadline) { BOOL success; DWORD bytes = 0; DWORD flags = 0; @@ -67,9 +59,9 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket; grpc_winsocket_callback_info *info; GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); - success = GetQueuedCompletionStatus( - g_iocp, &bytes, &completion_key, &overlapped, - deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type))); + success = + GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped, + deadline_to_millis_timeout(exec_ctx, deadline)); if (success == 0 && overlapped == NULL) { return GRPC_IOCP_WORK_TIMEOUT; } diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h index 9c89e868c5..4314ff2617 100644 --- a/src/core/lib/iomgr/iocp_windows.h +++ b/src/core/lib/iomgr/iocp_windows.h @@ -30,7 +30,7 @@ typedef enum { } grpc_iocp_work_status; grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - gpr_timespec deadline); + grpc_millis deadline); void grpc_iocp_init(void); void grpc_iocp_kick(void); void grpc_iocp_flush(void); diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index f63f190155..c74e4d61e9 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -48,7 +48,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) { gpr_cv_init(&g_rcv); grpc_exec_ctx_global_init(); grpc_executor_init(exec_ctx); - grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_list_init(exec_ctx); g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.name = (char *)"root"; grpc_network_status_init(); @@ -95,8 +95,9 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { } last_warning_time = gpr_now(GPR_CLOCK_REALTIME); } - if (grpc_timer_check(exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL) == - GRPC_TIMERS_FIRED) { + exec_ctx->now_is_valid = true; + exec_ctx->now = GRPC_MILLIS_INF_FUTURE; + if (grpc_timer_check(exec_ctx, NULL) == GRPC_TIMERS_FIRED) { gpr_mu_unlock(&g_mu); grpc_exec_ctx_flush(exec_ctx); grpc_iomgr_platform_flush(); diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c index 0b4d41ea4b..5cb4099ea4 100644 --- a/src/core/lib/iomgr/load_file.c +++ b/src/core/lib/iomgr/load_file.c @@ -25,7 +25,7 @@ #include <grpc/support/log.h> #include <grpc/support/string_util.h> -#include "src/core/lib/support/block_annotate.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/support/string.h" grpc_error *grpc_load_file(const char *filename, int add_null_terminator, @@ -73,6 +73,6 @@ end: GRPC_ERROR_UNREF(error); error = error_out; } - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX; return error; } diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h index a0f6b3a9d3..8f8e37afaf 100644 --- a/src/core/lib/iomgr/pollset.h +++ b/src/core/lib/iomgr/pollset.h @@ -71,8 +71,8 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); pollset lock */ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline) GRPC_MUST_USE_RESULT; + grpc_pollset_worker **worker, + grpc_millis deadline) GRPC_MUST_USE_RESULT; /* Break one polling thread out of polling work for this pollset. If specific_worker is non-NULL, then kick that worker. */ diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index 2651325e25..0127770a0d 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -116,13 +116,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { uint64_t timeout; GRPC_UV_ASSERT_SAME_THREAD(); gpr_mu_unlock(&grpc_polling_mu); if (grpc_pollset_work_run_loop) { - if (gpr_time_cmp(deadline, now) >= 0) { - timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now)); + grpc_millis now = grpc_exec_ctx_now(exec_ctx); + if (deadline >= now) { + timeout = deadline - now; } else { timeout = 0; } diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c index eb295d3eeb..bb4df83fc1 100644 --- a/src/core/lib/iomgr/pollset_windows.c +++ b/src/core/lib/iomgr/pollset_windows.c @@ -110,7 +110,7 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {} grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { + grpc_millis deadline) { grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; @@ -159,7 +159,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &worker); added_worker = 1; while (!worker.kicked) { - if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) { + if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { break; } } diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c index 60cfeebd47..1b783495df 100644 --- a/src/core/lib/iomgr/resolve_address_posix.c +++ b/src/core/lib/iomgr/resolve_address_posix.c @@ -33,10 +33,10 @@ #include <grpc/support/thd.h> #include <grpc/support/time.h> #include <grpc/support/useful.h> +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/string.h" static grpc_error *blocking_resolve_address_impl( @@ -81,7 +81,7 @@ static grpc_error *blocking_resolve_address_impl( GRPC_SCHEDULING_START_BLOCKING_REGION; s = getaddrinfo(host, port, &hints, &result); - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX; if (s != 0) { /* Retry if well-known service name is recognized */ @@ -90,7 +90,7 @@ static grpc_error *blocking_resolve_address_impl( if (strcmp(port, svc[i][0]) == 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; s = getaddrinfo(host, svc[i][1], &hints, &result); - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX; break; } } diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c index 0cb0029f4e..305930de44 100644 --- a/src/core/lib/iomgr/resolve_address_windows.c +++ b/src/core/lib/iomgr/resolve_address_windows.c @@ -33,10 +33,10 @@ #include <grpc/support/string_util.h> #include <grpc/support/thd.h> #include <grpc/support/time.h> +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/string.h" typedef struct { @@ -86,7 +86,7 @@ static grpc_error *blocking_resolve_address_impl( GRPC_SCHEDULING_START_BLOCKING_REGION; s = getaddrinfo(host, port, &hints, &result); - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX; if (s != 0) { error = GRPC_WSA_ERROR(WSAGetLastError(), "getaddrinfo"); goto done; diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h index 6c9e51ae84..c04fb8bc07 100644 --- a/src/core/lib/iomgr/tcp_client.h +++ b/src/core/lib/iomgr/tcp_client.h @@ -35,6 +35,6 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline); + grpc_millis deadline); #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */ diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c index 39dbb506e2..64834d88f5 100644 --- a/src/core/lib/iomgr/tcp_client_posix.c +++ b/src/core/lib/iomgr/tcp_client_posix.c @@ -48,7 +48,6 @@ extern grpc_tracer_flag grpc_tcp_trace; typedef struct { gpr_mu mu; grpc_fd *fd; - gpr_timespec deadline; grpc_timer alarm; grpc_closure on_alarm; int refs; @@ -244,7 +243,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline) { + grpc_millis deadline) { int fd; grpc_dualstack_mode dsmode; int err; @@ -325,9 +324,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&ac->mu); GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm); grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure); gpr_mu_unlock(&ac->mu); @@ -341,14 +338,14 @@ void (*grpc_tcp_client_connect_impl)( grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline) = tcp_client_connect_impl; + grpc_millis deadline) = tcp_client_connect_impl; void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline) { + grpc_millis deadline) { grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, channel_args, addr, deadline); } diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c index fc62105cc9..bf8016506a 100644 --- a/src/core/lib/iomgr/tcp_client_windows.c +++ b/src/core/lib/iomgr/tcp_client_windows.c @@ -41,7 +41,6 @@ typedef struct { grpc_closure *on_done; gpr_mu mu; grpc_winsocket *socket; - gpr_timespec deadline; grpc_timer alarm; grpc_closure on_alarm; char *addr_name; @@ -124,7 +123,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { static void tcp_client_connect_impl( grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, gpr_timespec deadline) { + const grpc_resolved_address *addr, grpc_millis deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; @@ -204,8 +203,7 @@ static void tcp_client_connect_impl( GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm, ); grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect); return; @@ -230,14 +228,14 @@ void (*grpc_tcp_client_connect_impl)( grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline) = tcp_client_connect_impl; + grpc_millis deadline) = tcp_client_connect_impl; void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, - gpr_timespec deadline) { + grpc_millis deadline) { grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, channel_args, addr, deadline); } diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 7e271294fd..7fcaef7679 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -135,13 +135,11 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp, gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p); } gpr_mu_lock(p->pollset_mu); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = - gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN)); + grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC; GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx); - GRPC_LOG_IF_ERROR("backup_poller:pollset_work", - grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, - now, deadline)); + GRPC_LOG_IF_ERROR( + "backup_poller:pollset_work", + grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, deadline)); gpr_mu_unlock(p->pollset_mu); /* last "uncovered" notification is the ref that keeps us polling, if we get * there try a cas to release it */ diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h index ac392f87fe..e51cb44fbc 100644 --- a/src/core/lib/iomgr/timer.h +++ b/src/core/lib/iomgr/timer.h @@ -41,8 +41,7 @@ typedef struct grpc_timer grpc_timer; application callback is also responsible for maintaining information about when to free up any user-level state. */ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - gpr_timespec deadline, grpc_closure *closure, - gpr_timespec now); + grpc_millis deadline, grpc_closure *closure); /* Initialize *timer without setting it. This can later be passed through the regular init or cancel */ @@ -92,8 +91,8 @@ typedef enum { with high probability at least one thread in the system will see an update at any time slice. */ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - gpr_timespec now, gpr_timespec *next); -void grpc_timer_list_init(gpr_timespec now); + grpc_millis *next); +void grpc_timer_list_init(grpc_exec_ctx *exec_ctx); void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx); /* Consume a kick issued by grpc_kick_poller */ diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index e9a7236c8c..78765818ca 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -97,9 +97,6 @@ struct shared_mutables { static struct shared_mutables g_shared_mutables; -static gpr_clock_type g_clock_type; -static gpr_timespec g_start_time; - static gpr_atm saturating_add(gpr_atm a, gpr_atm b) { if (a > GPR_ATM_MAX - b) { return GPR_ATM_MAX; @@ -112,52 +109,19 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm *next, grpc_error *error); -static gpr_timespec dbl_to_ts(double d) { - gpr_timespec ts; - ts.tv_sec = (int64_t)d; - ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec)); - ts.clock_type = GPR_TIMESPAN; - return ts; -} - -static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { - ts = gpr_time_sub(ts, g_start_time); - double x = GPR_MS_PER_SEC * (double)ts.tv_sec + - (double)ts.tv_nsec / GPR_NS_PER_MS + - (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC; - if (x < 0) return 0; - if (x > GPR_ATM_MAX) return GPR_ATM_MAX; - return (gpr_atm)x; -} - -static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) { - ts = gpr_time_sub(ts, g_start_time); - double x = - GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS; - if (x < 0) return 0; - if (x > GPR_ATM_MAX) return GPR_ATM_MAX; - return (gpr_atm)x; -} - -static gpr_timespec atm_to_timespec(gpr_atm x) { - return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0)); -} - static gpr_atm compute_min_deadline(timer_shard *shard) { return grpc_timer_heap_is_empty(&shard->heap) ? saturating_add(shard->queue_deadline_cap, 1) : grpc_timer_heap_top(&shard->heap)->deadline; } -void grpc_timer_list_init(gpr_timespec now) { +void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) { uint32_t i; g_shared_mutables.initialized = true; g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER; gpr_mu_init(&g_shared_mutables.mu); - g_clock_type = now.clock_type; - g_start_time = now; - g_shared_mutables.min_timer = timespec_to_atm_round_down(now); + g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx); gpr_tls_init(&g_last_seen_min_timer); gpr_tls_set(&g_last_seen_min_timer, 0); grpc_register_tracer(&grpc_timer_trace); @@ -192,10 +156,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { g_shared_mutables.initialized = false; } -static double ts_to_dbl(gpr_timespec ts) { - return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; -} - /* returns true if the first element in the list */ static void list_join(grpc_timer *head, grpc_timer *timer) { timer->next = head; @@ -236,20 +196,16 @@ static void note_deadline_change(timer_shard *shard) { void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; } void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - gpr_timespec deadline, grpc_closure *closure, - gpr_timespec now) { + grpc_millis deadline, grpc_closure *closure) { int is_first_timer = 0; timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; - GPR_ASSERT(deadline.clock_type == g_clock_type); - GPR_ASSERT(now.clock_type == g_clock_type); timer->closure = closure; - gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); + timer->deadline = deadline; if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR - "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", - timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec, - now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb); + gpr_log(GPR_DEBUG, + "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer, + deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb); } if (!g_shared_mutables.initialized) { @@ -262,7 +218,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_mu_lock(&shard->mu); timer->pending = true; - if (gpr_time_cmp(deadline, now) <= 0) { + grpc_millis now = grpc_exec_ctx_now(exec_ctx); + if (deadline <= now) { timer->pending = false; GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); gpr_mu_unlock(&shard->mu); @@ -271,8 +228,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } grpc_time_averaged_stats_add_sample(&shard->stats, - ts_to_dbl(gpr_time_sub(deadline, now))); - if (deadline_atm < shard->queue_deadline_cap) { + (double)(deadline - now) / 1000.0); + if (deadline < shard->queue_deadline_cap) { is_first_timer = grpc_timer_heap_add(&shard->heap, timer); } else { timer->heap_index = INVALID_HEAP_INDEX; @@ -303,12 +260,12 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR, shard->min_deadline); } - if (deadline_atm < shard->min_deadline) { + if (deadline < shard->min_deadline) { gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline; - shard->min_deadline = deadline_atm; + shard->min_deadline = deadline; note_deadline_change(shard); - if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) { - gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm); + if (shard->shard_queue_index == 0 && deadline < old_min_deadline) { + gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline); grpc_kick_poller(); } } @@ -407,8 +364,9 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) { } if (timer->deadline > now) return NULL; if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer, - now - timer->deadline); + gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler", + timer, now - timer->deadline, + timer->closure->scheduler->vtable->name); } timer->pending = false; grpc_timer_heap_pop(&shard->heap); @@ -429,6 +387,10 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, } *new_min_deadline = compute_min_deadline(shard); gpr_mu_unlock(&shard->mu); + if (GRPC_TRACER_ON(grpc_timer_check_trace)) { + gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR, + (int)(shard - g_shards), n); + } return n; } @@ -501,29 +463,27 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, } grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - gpr_timespec now, gpr_timespec *next) { + grpc_millis *next) { // prelude - GPR_ASSERT(now.clock_type == g_clock_type); - gpr_atm now_atm = timespec_to_atm_round_down(now); + grpc_millis now = grpc_exec_ctx_now(exec_ctx); /* fetch from a thread-local first: this avoids contention on a globally mutable cacheline in the common case */ - gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer); - if (now_atm < min_timer) { + grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer); + if (now < min_timer) { if (next != NULL) { - *next = - atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer)); + *next = GPR_MIN(*next, min_timer); } if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, - "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR, - now_atm, min_timer); + "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now, + min_timer); } return GRPC_TIMERS_CHECKED_AND_EMPTY; } grpc_error *shutdown_error = - gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0 + now != GRPC_MILLIS_INF_FUTURE ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"); @@ -533,34 +493,24 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, if (next == NULL) { next_str = gpr_strdup("NULL"); } else { - gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, - next->tv_nsec, timespec_to_atm_round_down(*next)); + gpr_asprintf(&next_str, "%" PRIdPTR, *next); } - gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR - "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, - now.tv_sec, now.tv_nsec, now_atm, next_str, - gpr_tls_get(&g_last_seen_min_timer), + gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR + " next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, + now, next_str, gpr_tls_get(&g_last_seen_min_timer), gpr_atm_no_barrier_load(&g_shared_mutables.min_timer)); gpr_free(next_str); } // actual code - grpc_timer_check_result r; - gpr_atm next_atm; - if (next == NULL) { - r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error); - } else { - next_atm = timespec_to_atm_round_down(*next); - r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error); - *next = atm_to_timespec(next_atm); - } + grpc_timer_check_result r = + run_some_expired_timers(exec_ctx, now, next, shutdown_error); // tracing if (GRPC_TRACER_ON(grpc_timer_check_trace)) { char *next_str; if (next == NULL) { next_str = gpr_strdup("NULL"); } else { - gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, - next->tv_nsec, next_atm); + gpr_asprintf(&next_str, "%" PRIdPTR, *next); } gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str); gpr_free(next_str); diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.c index 04ca44563d..f5dbe247ad 100644 --- a/src/core/lib/iomgr/timer_manager.c +++ b/src/core/lib/iomgr/timer_manager.c @@ -52,7 +52,7 @@ static bool g_kicked; static bool g_has_timed_waiter; // the deadline of the current timed waiter thread (only relevant if // g_has_timed_waiter is true) -static gpr_timespec g_timed_waiter_deadline; +static grpc_millis g_timed_waiter_deadline; // generation counter to track which thread is waiting for the next timer static uint64_t g_timed_waiter_generation; @@ -96,9 +96,8 @@ static void start_timer_thread_and_unlock(void) { void grpc_timer_manager_tick() { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - gpr_timespec next = gpr_inf_future(GPR_CLOCK_MONOTONIC); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - grpc_timer_check(&exec_ctx, now, &next); + grpc_millis next = GRPC_MILLIS_INF_FUTURE; + grpc_timer_check(&exec_ctx, &next); grpc_exec_ctx_finish(&exec_ctx); } @@ -121,6 +120,9 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { gpr_mu_unlock(&g_mu); } // without our lock, flush the exec_ctx + if (GRPC_TRACER_ON(grpc_timer_check_trace)) { + gpr_log(GPR_DEBUG, "flush exec_ctx"); + } grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(&g_mu); // garbage collect any threads hanging out that are dead @@ -133,8 +135,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { // wait until 'next' (or forever if there is already a timed waiter in the pool) // returns true if the thread should continue executing (false if it should // shutdown) -static bool wait_until(gpr_timespec next) { - const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC); +static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) { gpr_mu_lock(&g_mu); // if we're not threaded anymore, leave if (!g_threaded) { @@ -168,30 +169,29 @@ static bool wait_until(gpr_timespec next) { unless their 'next' is earlier than the current timed-waiter's deadline (in which case the thread with earlier 'next' takes over as the new timed waiter) */ - if (gpr_time_cmp(next, inf_future) != 0) { - if (!g_has_timed_waiter || - (gpr_time_cmp(next, g_timed_waiter_deadline) < 0)) { + if (next != GRPC_MILLIS_INF_FUTURE) { + if (!g_has_timed_waiter || (next < g_timed_waiter_deadline)) { my_timed_waiter_generation = ++g_timed_waiter_generation; g_has_timed_waiter = true; g_timed_waiter_deadline = next; if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_timespec wait_time = - gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC)); - gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds", - wait_time.tv_sec, wait_time.tv_nsec); + grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx); + gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds", + wait_time); } } else { // g_timed_waiter == true && next >= g_timed_waiter_deadline - next = inf_future; + next = GRPC_MILLIS_INF_FUTURE; } } if (GRPC_TRACER_ON(grpc_timer_check_trace) && - gpr_time_cmp(next, inf_future) == 0) { + next == GRPC_MILLIS_INF_FUTURE) { gpr_log(GPR_DEBUG, "sleep until kicked"); } - gpr_cv_wait(&g_cv_wait, &g_mu, next); + gpr_cv_wait(&g_cv_wait, &g_mu, + grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME)); if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d", @@ -203,7 +203,7 @@ static bool wait_until(gpr_timespec next) { // there's work to do after checking timers (code above) if (my_timed_waiter_generation == g_timed_waiter_generation) { g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; } } @@ -219,12 +219,11 @@ static bool wait_until(gpr_timespec next) { } static void timer_main_loop(grpc_exec_ctx *exec_ctx) { - const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC); for (;;) { - gpr_timespec next = inf_future; - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + grpc_millis next = GRPC_MILLIS_INF_FUTURE; + grpc_exec_ctx_invalidate_now(exec_ctx); // check timer state, updates next to the next time to run a check - switch (grpc_timer_check(exec_ctx, now, &next)) { + switch (grpc_timer_check(exec_ctx, &next)) { case GRPC_TIMERS_FIRED: run_some_timers(exec_ctx); break; @@ -241,10 +240,10 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) { if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, "timers not checked: expect another thread to"); } - next = inf_future; + next = GRPC_MILLIS_INF_FUTURE; /* fall through */ case GRPC_TIMERS_CHECKED_AND_EMPTY: - if (!wait_until(next)) { + if (!wait_until(exec_ctx, next)) { return; } break; @@ -300,7 +299,7 @@ void grpc_timer_manager_init(void) { g_completed_threads = NULL; g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; start_threads(); } @@ -347,7 +346,7 @@ void grpc_kick_poller(void) { gpr_mu_lock(&g_mu); g_kicked = true; g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; ++g_timed_waiter_generation; gpr_cv_signal(&g_cv_wait); gpr_mu_unlock(&g_mu); diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c index 691d66df69..0877818291 100644 --- a/src/core/lib/security/credentials/google_default/google_default_credentials.c +++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c @@ -96,7 +96,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ - gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN); + grpc_millis max_detection_delay = GPR_MS_PER_SEC; grpc_pollset *pollset = gpr_zalloc(grpc_pollset_size()); grpc_pollset_init(pollset, &g_polling_mu); @@ -115,7 +115,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { grpc_resource_quota_create("google_default_credentials"); grpc_httpcli_get( exec_ctx, &context, &detector.pollent, resource_quota, &request, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), + grpc_exec_ctx_now(exec_ctx) + max_detection_delay, GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector, grpc_schedule_on_exec_ctx), &detector.response); @@ -132,8 +132,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { "pollset_work", grpc_pollset_work(exec_ctx, grpc_polling_entity_pollset(&detector.pollent), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))) { + &worker, GRPC_MILLIS_INF_FUTURE))) { detector.is_done = 1; detector.success = 0; } diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c index a27284bc50..7c24af86e8 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.c +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c @@ -380,7 +380,7 @@ void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN}; /* Max delay defaults to one minute. */ -gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN}; +grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC; typedef struct { char *email_domain; @@ -707,7 +707,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, grpc_resource_quota_create("jwt_verifier"); grpc_httpcli_get( exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), + grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay, GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx), &ctx->responses[HTTP_RESPONSE_KEYS]); grpc_resource_quota_unref_internal(exec_ctx, resource_quota); @@ -833,10 +833,10 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, extreme memory pressure. */ grpc_resource_quota *resource_quota = grpc_resource_quota_create("jwt_verifier"); - grpc_httpcli_get( - exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), - http_cb, &ctx->responses[rsp_idx]); + grpc_httpcli_get(exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, + resource_quota, &req, + grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay, + http_cb, &ctx->responses[rsp_idx]); grpc_resource_quota_unref_internal(exec_ctx, resource_quota); gpr_free(req.host); gpr_free(req.http.path); diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index 8fac452d4e..3a3261a780 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -81,7 +81,7 @@ typedef struct { /* Globals to control the verifier. Not thread-safe. */ extern gpr_timespec grpc_jwt_verifier_clock_skew; -extern gpr_timespec grpc_jwt_verifier_max_delay; +extern grpc_millis grpc_jwt_verifier_max_delay; /* The verifier can be created with some custom mappings to help with key discovery in the case where the issuer is an email address. diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c index 10b270c49c..5fe1f71f47 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c @@ -117,7 +117,7 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx, grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( grpc_exec_ctx *exec_ctx, const grpc_http_response *response, - grpc_mdelem *token_md, gpr_timespec *token_lifetime) { + grpc_mdelem *token_md, grpc_millis *token_lifetime) { char *null_terminated_body = NULL; char *new_access_token = NULL; grpc_credentials_status status = GRPC_CREDENTIALS_OK; @@ -183,9 +183,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( } gpr_asprintf(&new_access_token, "%s %s", token_type->value, access_token->value); - token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10); - token_lifetime->tv_nsec = 0; - token_lifetime->clock_type = GPR_TIMESPAN; + *token_lifetime = strtol(expires_in->value, NULL, 10) * GPR_MS_PER_SEC; if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md); *token_md = grpc_mdelem_from_slices( exec_ctx, @@ -214,7 +212,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)r->creds; grpc_mdelem access_token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_credentials_status status = grpc_oauth2_token_fetcher_credentials_parse_server_response( exec_ctx, &r->response, &access_token_md, &token_lifetime); @@ -222,10 +220,9 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&c->mu); c->token_fetch_pending = false; c->access_token_md = GRPC_MDELEM_REF(access_token_md); - c->token_expiration = - status == GRPC_CREDENTIALS_OK - ? gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime) - : gpr_inf_past(GPR_CLOCK_REALTIME); + c->token_expiration = status == GRPC_CREDENTIALS_OK + ? grpc_exec_ctx_now(exec_ctx) + token_lifetime + : 0; grpc_oauth2_pending_get_request_metadata *pending_request = c->pending_requests; c->pending_requests = NULL; @@ -260,14 +257,12 @@ static bool oauth2_token_fetcher_get_request_metadata( grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; // Check if we can use the cached token. - gpr_timespec refresh_threshold = gpr_time_from_seconds( - GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN); + grpc_millis refresh_threshold = + GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC; grpc_mdelem cached_access_token_md = GRPC_MDNULL; gpr_mu_lock(&c->mu); if (!GRPC_MDISNULL(c->access_token_md) && - (gpr_time_cmp( - gpr_time_sub(c->token_expiration, gpr_now(GPR_CLOCK_REALTIME)), - refresh_threshold) > 0)) { + (c->token_expiration + grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) { cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md); } if (!GRPC_MDISNULL(cached_access_token_md)) { @@ -296,10 +291,10 @@ static bool oauth2_token_fetcher_get_request_metadata( gpr_mu_unlock(&c->mu); if (start_fetch) { grpc_call_credentials_ref(creds); - c->fetch_func( - exec_ctx, grpc_credentials_metadata_request_create(creds), - &c->httpcli_context, &c->pollent, on_oauth2_token_fetcher_http_response, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), refresh_threshold)); + c->fetch_func(exec_ctx, grpc_credentials_metadata_request_create(creds), + &c->httpcli_context, &c->pollent, + on_oauth2_token_fetcher_http_response, + grpc_exec_ctx_now(exec_ctx) + refresh_threshold); } return false; } @@ -340,7 +335,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c, c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; gpr_ref_init(&c->base.refcount, 1); gpr_mu_init(&c->mu); - c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); + c->token_expiration = 0; c->fetch_func = fetch_func; c->pollent = grpc_polling_entity_create_from_pollset_set(grpc_pollset_set_create()); @@ -358,7 +353,7 @@ static grpc_call_credentials_vtable compute_engine_vtable = { static void compute_engine_fetch_oauth2( grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, - grpc_iomgr_cb_func response_cb, gpr_timespec deadline) { + grpc_iomgr_cb_func response_cb, grpc_millis deadline) { grpc_http_header header = {"Metadata-Flavor", "Google"}; grpc_httpcli_request request; memset(&request, 0, sizeof(grpc_httpcli_request)); @@ -409,7 +404,7 @@ static grpc_call_credentials_vtable refresh_token_vtable = { static void refresh_token_fetch_oauth2( grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, - grpc_iomgr_cb_func response_cb, gpr_timespec deadline) { + grpc_iomgr_cb_func response_cb, grpc_millis deadline) { grpc_google_refresh_token_credentials *c = (grpc_google_refresh_token_credentials *)metadata_req->creds; grpc_http_header header = {"Content-Type", diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h index d9ad6691b8..5d9f24307f 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h @@ -57,7 +57,7 @@ typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *http_context, grpc_polling_entity *pollent, grpc_iomgr_cb_func cb, - gpr_timespec deadline); + grpc_millis deadline); typedef struct grpc_oauth2_pending_get_request_metadata { grpc_credentials_mdelem_array *md_array; @@ -70,7 +70,7 @@ typedef struct { grpc_call_credentials base; gpr_mu mu; grpc_mdelem access_token_md; - gpr_timespec token_expiration; + grpc_millis token_expiration; bool token_fetch_pending; grpc_oauth2_pending_get_request_metadata *pending_requests; grpc_httpcli_context httpcli_context; @@ -100,6 +100,6 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token( grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response, - grpc_mdelem *token_md, gpr_timespec *token_lifetime); + grpc_mdelem *token_md, grpc_millis *token_lifetime); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H */ diff --git a/src/core/lib/support/time_posix.c b/src/core/lib/support/time_posix.c index 3ead40d807..753a629e6e 100644 --- a/src/core/lib/support/time_posix.c +++ b/src/core/lib/support/time_posix.c @@ -30,7 +30,6 @@ #include <grpc/support/atm.h> #include <grpc/support/log.h> #include <grpc/support/time.h> -#include "src/core/lib/support/block_annotate.h" static struct timespec timespec_from_gpr(gpr_timespec gts) { struct timespec rv; @@ -157,9 +156,7 @@ void gpr_sleep_until(gpr_timespec until) { delta = gpr_time_sub(until, now); delta_ts = timespec_from_gpr(delta); - GRPC_SCHEDULING_START_BLOCKING_REGION; ns_result = nanosleep(&delta_ts, NULL); - GRPC_SCHEDULING_END_BLOCKING_REGION; if (ns_result == 0) { break; } diff --git a/src/core/lib/support/time_windows.c b/src/core/lib/support/time_windows.c index 40df3761c0..fb17e5c079 100644 --- a/src/core/lib/support/time_windows.c +++ b/src/core/lib/support/time_windows.c @@ -28,7 +28,6 @@ #include <process.h> #include <sys/timeb.h> -#include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/time_precise.h" static LARGE_INTEGER g_start_time; @@ -92,9 +91,7 @@ void gpr_sleep_until(gpr_timespec until) { sleep_millis = delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS; GPR_ASSERT((sleep_millis >= 0) && (sleep_millis <= INT_MAX)); - GRPC_SCHEDULING_START_BLOCKING_REGION; Sleep((DWORD)sleep_millis); - GRPC_SCHEDULING_END_BLOCKING_REGION; } } diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c index 7712f560b9..e3bcbb9d80 100644 --- a/src/core/lib/surface/alarm.c +++ b/src/core/lib/surface/alarm.c @@ -122,8 +122,7 @@ void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, GPR_ASSERT(grpc_cq_begin_op(cq, tag)); grpc_timer_init(&exec_ctx, &alarm->alarm, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timespec_to_millis_round_up(deadline), &alarm->on_alarm); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 03f47553a1..2871dea6ea 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -214,7 +214,7 @@ struct grpc_call { server, it's trailing metadata */ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT]; int send_extra_metadata_count; - gpr_timespec send_deadline; + grpc_millis send_deadline; grpc_slice_buffer_stream sending_stream; @@ -281,7 +281,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack, grpc_error *error); static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, grpc_error *error); -static void get_final_status(grpc_call *call, +static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, void (*set_value)(grpc_status_code code, void *user_data), void *set_value_user_data, grpc_slice *details); @@ -370,11 +370,10 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { - call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE; } } - gpr_timespec send_deadline = - gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC); + grpc_millis send_deadline = args->send_deadline; bool immediately_cancel = false; @@ -392,10 +391,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&pc->child_list_mu); if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) { - send_deadline = gpr_time_min( - gpr_convert_clock_type(send_deadline, - args->parent->send_deadline.clock_type), - args->parent->send_deadline); + send_deadline = GPR_MIN(send_deadline, args->parent->send_deadline); } /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with * GRPC_PROPAGATE_STATS_CONTEXT */ @@ -550,8 +546,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind"); } - get_final_status(c, set_status_value_directly, &c->final_info.final_status, - NULL); + get_final_status(exec_ctx, c, set_status_value_directly, + &c->final_info.final_status, NULL); c->final_info.stats.latency = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time); @@ -737,13 +733,16 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, * FINAL STATUS CODE MANIPULATION */ -static bool get_final_status_from( - grpc_call *call, grpc_error *error, bool allow_ok_status, - void (*set_value)(grpc_status_code code, void *user_data), - void *set_value_user_data, grpc_slice *details) { +static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call, + grpc_error *error, bool allow_ok_status, + void (*set_value)(grpc_status_code code, + void *user_data), + void *set_value_user_data, + grpc_slice *details) { grpc_status_code code; grpc_slice slice = grpc_empty_slice(); - grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL); + grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice, + NULL); if (code == GRPC_STATUS_OK && !allow_ok_status) { return false; } @@ -755,7 +754,7 @@ static bool get_final_status_from( return true; } -static void get_final_status(grpc_call *call, +static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, void (*set_value)(grpc_status_code code, void *user_data), void *set_value_user_data, grpc_slice *details) { @@ -780,8 +779,9 @@ static void get_final_status(grpc_call *call, for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set && grpc_error_has_clear_grpc_status(status[i].error)) { - if (get_final_status_from(call, status[i].error, allow_ok_status != 0, - set_value, set_value_user_data, details)) { + if (get_final_status_from(exec_ctx, call, status[i].error, + allow_ok_status != 0, set_value, + set_value_user_data, details)) { return; } } @@ -789,8 +789,9 @@ static void get_final_status(grpc_call *call, /* If no clearly defined status exists, search for 'anything' */ for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set) { - if (get_final_status_from(call, status[i].error, allow_ok_status != 0, - set_value, set_value_user_data, details)) { + if (get_final_status_from(exec_ctx, call, status[i].error, + allow_ok_status != 0, set_value, + set_value_user_data, details)) { return; } } @@ -1331,17 +1332,22 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, } if (call->is_client) { - get_final_status(call, set_status_value_directly, + get_final_status(exec_ctx, call, set_status_value_directly, call->final_op.client.status, call->final_op.client.status_details); } else { - get_final_status(call, set_cancelled_value, + get_final_status(exec_ctx, call, set_cancelled_value, call->final_op.server.cancelled, NULL); } GRPC_ERROR_UNREF(error); error = GRPC_ERROR_NONE; } + if (error != GRPC_ERROR_NONE && bctl->op.recv_message && + *call->receiving_buffer != NULL) { + grpc_byte_buffer_destroy(*call->receiving_buffer); + *call->receiving_buffer = NULL; + } if (bctl->completion_data.notify_tag.is_closure) { /* unrefs bctl->error */ @@ -1612,11 +1618,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, validate_filtered_metadata(exec_ctx, bctl); GPR_TIMER_END("validate_filtered_metadata", 0); - if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) != - 0 && - !call->is_client) { - call->send_deadline = - gpr_convert_clock_type(md->deadline, GPR_CLOCK_MONOTONIC); + if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) { + call->send_deadline = md->deadline; } } diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h index c680139cf6..27c2f5243c 100644 --- a/src/core/lib/surface/call.h +++ b/src/core/lib/surface/call.h @@ -49,7 +49,7 @@ typedef struct grpc_call_create_args { grpc_mdelem *add_initial_metadata; size_t add_initial_metadata_count; - gpr_timespec send_deadline; + grpc_millis send_deadline; } grpc_call_create_args; /* Create a new call based on \a args. diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c index 48962e5e45..0b5ca17cc4 100644 --- a/src/core/lib/surface/channel.c +++ b/src/core/lib/surface/channel.c @@ -265,7 +265,7 @@ static grpc_call *grpc_channel_create_call_internal( grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem, - grpc_mdelem authority_mdelem, gpr_timespec deadline) { + grpc_mdelem authority_mdelem, grpc_millis deadline) { grpc_mdelem send_metadata[2]; size_t num_metadata = 0; @@ -311,7 +311,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel, host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY, grpc_slice_ref_internal(*host)) : GRPC_MDNULL, - deadline); + grpc_timespec_to_millis_round_up(deadline)); grpc_exec_ctx_finish(&exec_ctx); return call; } @@ -319,7 +319,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_call *grpc_channel_create_pollset_set_call( grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, gpr_timespec deadline, void *reserved) { + const grpc_slice *host, grpc_millis deadline, void *reserved) { GPR_ASSERT(!reserved); return grpc_channel_create_call_internal( exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set, @@ -375,7 +375,8 @@ grpc_call *grpc_channel_create_registered_call( grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_call *call = grpc_channel_create_call_internal( &exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL, - GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), deadline); + GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), + grpc_timespec_to_millis_round_up(deadline)); grpc_exec_ctx_finish(&exec_ctx); return call; } diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h index 528bb868e2..827dd992b5 100644 --- a/src/core/lib/surface/channel.h +++ b/src/core/lib/surface/channel.h @@ -43,7 +43,7 @@ grpc_channel *grpc_channel_create_with_builder( grpc_call *grpc_channel_create_pollset_set_call( grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, gpr_timespec deadline, void *reserved); + const grpc_slice *host, grpc_millis deadline, void *reserved); /** Get a (borrowed) pointer to this channels underlying channel stack */ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel); diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c index fed66e3a20..a05ac02b89 100644 --- a/src/core/lib/surface/completion_queue.c +++ b/src/core/lib/surface/completion_queue.c @@ -58,8 +58,7 @@ typedef struct { grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *specific_worker); grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline); + grpc_pollset_worker **worker, grpc_millis deadline); void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure); void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); @@ -97,8 +96,7 @@ static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx, static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker, - gpr_timespec now, - gpr_timespec deadline) { + grpc_millis deadline) { non_polling_poller *npp = (non_polling_poller *)pollset; if (npp->shutdown) return GRPC_ERROR_NONE; non_polling_worker w; @@ -112,7 +110,10 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, w.next->prev = w.prev->next = &w; } w.kicked = false; - while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline)) + gpr_timespec deadline_ts = + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME); + while (!npp->shutdown && !w.kicked && + !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts)) ; if (&w == npp->root) { npp->root = w.next; @@ -757,7 +758,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, typedef struct { gpr_atm last_seen_things_queued_ever; grpc_completion_queue *cq; - gpr_timespec deadline; + grpc_millis deadline; grpc_cq_completion *stolen_completion; void *tag; /* for pluck */ bool first_loop; @@ -786,8 +787,7 @@ static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) { return true; } } - return !a->first_loop && - gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0; + return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx); } #ifndef NDEBUG @@ -816,7 +816,6 @@ static void dump_pending_tags(grpc_completion_queue *cq) {} static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, void *reserved) { grpc_event ret; - gpr_timespec now; cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); GPR_TIMER_BEGIN("grpc_completion_queue_next", 0); @@ -833,23 +832,21 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, dump_pending_tags(cq); - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - GRPC_CQ_INTERNAL_REF(cq, "next"); + grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); cq_is_finished_arg is_finished_arg = { .last_seen_things_queued_ever = gpr_atm_no_barrier_load(&cqd->things_queued_ever), .cq = cq, - .deadline = deadline, + .deadline = deadline_millis, .stolen_completion = NULL, .tag = NULL, .first_loop = true}; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg); - for (;;) { - gpr_timespec iteration_deadline = deadline; + grpc_millis iteration_deadline = deadline_millis; if (is_finished_arg.stolen_completion != NULL) { grpc_cq_completion *c = is_finished_arg.stolen_completion; @@ -876,7 +873,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, attempt at popping. Not doing this can potentially deadlock this thread forever (if the deadline is infinity) */ if (cq_event_queue_num_items(&cqd->queue) > 0) { - iteration_deadline = gpr_time_0(GPR_CLOCK_MONOTONIC); + iteration_deadline = 0; } } @@ -897,8 +894,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, break; } - now = gpr_now(GPR_CLOCK_MONOTONIC); - if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) { + if (!is_finished_arg.first_loop && + grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) { memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; dump_pending_tags(cq); @@ -909,7 +906,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, gpr_mu_lock(cq->mu); cq->num_polls++; grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - NULL, now, iteration_deadline); + NULL, iteration_deadline); gpr_mu_unlock(cq->mu); if (err != GRPC_ERROR_NONE) { @@ -1046,8 +1043,7 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) { } gpr_mu_unlock(cq->mu); } - return !a->first_loop && - gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0; + return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx); } static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, @@ -1056,7 +1052,6 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, grpc_cq_completion *c; grpc_cq_completion *prev; grpc_pollset_worker *worker = NULL; - gpr_timespec now; cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0); @@ -1075,15 +1070,14 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, dump_pending_tags(cq); - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - GRPC_CQ_INTERNAL_REF(cq, "pluck"); gpr_mu_lock(cq->mu); + grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); cq_is_finished_arg is_finished_arg = { .last_seen_things_queued_ever = gpr_atm_no_barrier_load(&cqd->things_queued_ever), .cq = cq, - .deadline = deadline, + .deadline = deadline_millis, .stolen_completion = NULL, .tag = tag, .first_loop = true}; @@ -1135,8 +1129,8 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, dump_pending_tags(cq); break; } - now = gpr_now(GPR_CLOCK_MONOTONIC); - if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) { + if (!is_finished_arg.first_loop && + grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) { del_plucker(cq, tag, &worker); gpr_mu_unlock(cq->mu); memset(&ret, 0, sizeof(ret)); @@ -1144,10 +1138,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, dump_pending_tags(cq); break; } - cq->num_polls++; grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - &worker, now, deadline); + &worker, deadline_millis); if (err != GRPC_ERROR_NONE) { del_plucker(cq, tag, &worker); gpr_mu_unlock(cq->mu); diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc index 6286f9159d..88e26cbeb7 100644 --- a/src/core/lib/surface/lame_client.cc +++ b/src/core/lib/surface/lame_client.cc @@ -74,7 +74,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, mdb->list.head = &calld->status; mdb->list.tail = &calld->details; mdb->list.count = 2; - mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + mdb->deadline = GRPC_MILLIS_INF_FUTURE; } static void lame_start_transport_stream_op_batch( diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 1d0fd472d0..dd09cb91de 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -137,7 +137,7 @@ struct call_data { bool host_set; grpc_slice path; grpc_slice host; - gpr_timespec deadline; + grpc_millis deadline; grpc_completion_queue *cq_new; @@ -492,11 +492,13 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server, GPR_ASSERT(calld->path_set); rc->data.batch.details->host = grpc_slice_ref_internal(calld->host); rc->data.batch.details->method = grpc_slice_ref_internal(calld->path); - rc->data.batch.details->deadline = calld->deadline; + rc->data.batch.details->deadline = + grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC); rc->data.batch.details->flags = calld->recv_initial_metadata_flags; break; case REGISTERED_CALL: - *rc->data.registered.deadline = calld->deadline; + *rc->data.registered.deadline = + grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC); if (rc->data.registered.optional_payload) { *rc->data.registered.optional_payload = calld->payload; calld->payload = NULL; @@ -739,7 +741,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)ptr; call_data *calld = (call_data *)elem->call_data; - gpr_timespec op_deadline; + grpc_millis op_deadline; if (error == GRPC_ERROR_NONE) { GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != NULL); @@ -759,7 +761,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, GRPC_ERROR_REF(error); } op_deadline = calld->recv_initial_metadata->deadline; - if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) { + if (op_deadline != GRPC_MILLIS_INF_FUTURE) { calld->deadline = op_deadline; } if (calld->host_set && calld->path_set) { @@ -833,7 +835,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, memset(&args, 0, sizeof(args)); args.channel = chand->channel; args.server_transport_data = transport_server_data; - args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + args.send_deadline = GRPC_MILLIS_INF_FUTURE; grpc_call *call; grpc_error *error = grpc_call_create(exec_ctx, &args, &call); grpc_call_element *elem = @@ -881,7 +883,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; memset(calld, 0, sizeof(call_data)); - calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + calld->deadline = GRPC_MILLIS_INF_FUTURE; calld->call = grpc_call_from_top_element(elem); gpr_mu_init(&calld->mu_state); diff --git a/src/core/lib/transport/bdp_estimator.c b/src/core/lib/transport/bdp_estimator.c index 8b57693413..2a3ffbbd01 100644 --- a/src/core/lib/transport/bdp_estimator.c +++ b/src/core/lib/transport/bdp_estimator.c @@ -29,8 +29,11 @@ grpc_tracer_flag grpc_bdp_estimator_trace = void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name) { estimator->estimate = 65536; estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED; + estimator->ping_start_time = gpr_time_0(GPR_CLOCK_MONOTONIC); estimator->name = name; estimator->bw_est = 0; + estimator->inter_ping_delay = 100.0; // start at 100ms + estimator->stable_estimate_count = 0; } bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator, @@ -53,7 +56,8 @@ void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator, bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator) { switch (estimator->ping_state) { case GRPC_BDP_PING_UNSCHEDULED: - return true; + return gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), + estimator->ping_start_time) >= 0; case GRPC_BDP_PING_SCHEDULED: return false; case GRPC_BDP_PING_STARTED: @@ -84,10 +88,11 @@ void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator) { } void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) { - gpr_timespec dt_ts = - gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), estimator->ping_start_time); + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + gpr_timespec dt_ts = gpr_time_sub(now, estimator->ping_start_time); double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec; double bw = dt > 0 ? ((double)estimator->accumulator / dt) : 0; + int start_inter_ping_delay = estimator->inter_ping_delay; if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64 " dt=%lf bw=%lfMbs bw_est=%lfMbs", @@ -104,7 +109,26 @@ void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) { gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, estimator->name, estimator->estimate); } + estimator->inter_ping_delay /= 2; // if the ping estimate changes, + // exponentially get faster at probing + } else if (estimator->inter_ping_delay < 10000) { + estimator->stable_estimate_count++; + if (estimator->stable_estimate_count >= 2) { + estimator->inter_ping_delay += + 100 + + (int)(rand() * 100.0 / RAND_MAX); // if the ping estimate is steady, + // slowly ramp down the probe time + } + } + if (start_inter_ping_delay != estimator->inter_ping_delay) { + estimator->stable_estimate_count = 0; + if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { + gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", estimator->name, + estimator->inter_ping_delay); + } } estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED; estimator->accumulator = 0; + estimator->ping_start_time = gpr_time_add( + now, gpr_time_from_millis(estimator->inter_ping_delay, GPR_TIMESPAN)); } diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h index 1ef0dc99dd..670e38dd4b 100644 --- a/src/core/lib/transport/bdp_estimator.h +++ b/src/core/lib/transport/bdp_estimator.h @@ -39,7 +39,12 @@ typedef struct grpc_bdp_estimator { grpc_bdp_estimator_ping_state ping_state; int64_t accumulator; int64_t estimate; + // case ping_state of + // GRPC_BDP_PING_UNSCHEDULED => when to start the next ping + // GRPC_BDP_PING_STARTED => when the current ping was started gpr_timespec ping_start_time; + int inter_ping_delay; + int stable_estimate_count; double bw_est; const char *name; } grpc_bdp_estimator; diff --git a/src/core/lib/transport/error_utils.c b/src/core/lib/transport/error_utils.c index 5e3920b627..2e3b61b7ab 100644 --- a/src/core/lib/transport/error_utils.c +++ b/src/core/lib/transport/error_utils.c @@ -39,8 +39,9 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error, return NULL; } -void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, - grpc_status_code *code, grpc_slice *slice, +void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error, + grpc_millis deadline, grpc_status_code *code, + grpc_slice *slice, grpc_http2_error_code *http_error) { // Start with the parent error and recurse through the tree of children // until we find the first one that has a status code. @@ -63,8 +64,8 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, status = (grpc_status_code)integer; } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR, &integer)) { - status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer, - deadline); + status = grpc_http2_error_to_grpc_status( + exec_ctx, (grpc_http2_error_code)integer, deadline); } if (code != NULL) *code = status; diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h index e530884215..6211031a24 100644 --- a/src/core/lib/transport/error_utils.h +++ b/src/core/lib/transport/error_utils.h @@ -20,6 +20,7 @@ #define GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H #include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/http2_errors.h" /// A utility function to get the status code and message to be returned @@ -28,8 +29,9 @@ /// All attributes are pulled from the same child error. If any of the /// attributes (code, msg, http_status) are unneeded, they can be passed as /// NULL. -void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, - grpc_status_code *code, grpc_slice *slice, +void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error, + grpc_millis deadline, grpc_status_code *code, + grpc_slice *slice, grpc_http2_error_code *http_status); /// A utility function to check whether there is a clear status code that diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c index 54388bdcda..2df9c9189c 100644 --- a/src/core/lib/transport/metadata_batch.c +++ b/src/core/lib/transport/metadata_batch.c @@ -74,7 +74,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) { void grpc_metadata_batch_init(grpc_metadata_batch *batch) { memset(batch, 0, sizeof(*batch)); - batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + batch->deadline = GRPC_MILLIS_INF_FUTURE; } void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx, @@ -270,9 +270,7 @@ void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx, } bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) { - return batch->list.head == NULL && - gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type), - batch->deadline) == 0; + return batch->list.head == NULL && batch->deadline == GRPC_MILLIS_INF_FUTURE; } size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) { diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index 57d298c75c..2adf1881e7 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -51,9 +51,9 @@ typedef struct grpc_metadata_batch { grpc_mdelem_list list; grpc_metadata_batch_callouts idx; /** Used to calculate grpc-timeout at the point of sending, - or gpr_inf_future if this batch does not need to send a + or GRPC_MILLIS_INF_FUTURE if this batch does not need to send a grpc-timeout */ - gpr_timespec deadline; + grpc_millis deadline; } grpc_metadata_batch; void grpc_metadata_batch_init(grpc_metadata_batch *batch); diff --git a/src/core/lib/transport/status_conversion.c b/src/core/lib/transport/status_conversion.c index a40d333284..891c4427d7 100644 --- a/src/core/lib/transport/status_conversion.c +++ b/src/core/lib/transport/status_conversion.c @@ -37,8 +37,9 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) { } } -grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - gpr_timespec deadline) { +grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx, + grpc_http2_error_code error, + grpc_millis deadline) { switch (error) { case GRPC_HTTP2_NO_ERROR: /* should never be received */ @@ -46,7 +47,7 @@ grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, case GRPC_HTTP2_CANCEL: /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been * exceeded */ - return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0 + return grpc_exec_ctx_now(exec_ctx) > deadline ? GRPC_STATUS_DEADLINE_EXCEEDED : GRPC_STATUS_CANCELLED; case GRPC_HTTP2_ENHANCE_YOUR_CALM: diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h index e93f3dfd9b..a05ed7658c 100644 --- a/src/core/lib/transport/status_conversion.h +++ b/src/core/lib/transport/status_conversion.h @@ -20,12 +20,14 @@ #define GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H #include <grpc/grpc.h> +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/http2_errors.h" /* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status); -grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - gpr_timespec deadline); +grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx, + grpc_http2_error_code error, + grpc_millis deadline); /* Conversion of HTTP status codes (:status) to grpc status codes */ grpc_status_code grpc_http2_status_to_grpc_status(int status); diff --git a/src/core/lib/transport/timeout_encoding.c b/src/core/lib/transport/timeout_encoding.c index 02f179d6a3..23a9ef308f 100644 --- a/src/core/lib/transport/timeout_encoding.c +++ b/src/core/lib/transport/timeout_encoding.c @@ -59,60 +59,27 @@ static void enc_seconds(char *buffer, int64_t sec) { } } -static void enc_nanos(char *buffer, int64_t x) { +static void enc_millis(char *buffer, int64_t x) { x = round_up_to_three_sig_figs(x); - if (x < 100000) { - if (x % 1000 == 0) { - enc_ext(buffer, x / 1000, 'u'); - } else { - enc_ext(buffer, x, 'n'); - } - } else if (x < 100000000) { - if (x % 1000000 == 0) { - enc_ext(buffer, x / 1000000, 'm'); - } else { - enc_ext(buffer, x / 1000, 'u'); - } - } else if (x < 1000000000) { - enc_ext(buffer, x / 1000000, 'm'); + if (x < GPR_MS_PER_SEC) { + enc_ext(buffer, x, 'm'); } else { - /* note that this is only ever called with times of less than one second, - so if we reach here the time must have been rounded up to a whole second - (and no more) */ - memcpy(buffer, "1S", 3); - } -} - -static void enc_micros(char *buffer, int64_t x) { - x = round_up_to_three_sig_figs(x); - if (x < 100000) { - if (x % 1000 == 0) { - enc_ext(buffer, x / 1000, 'm'); + if (x % GPR_MS_PER_SEC == 0) { + enc_seconds(buffer, x / GPR_MS_PER_SEC); } else { - enc_ext(buffer, x, 'u'); + enc_ext(buffer, x, 'm'); } - } else if (x < 100000000) { - if (x % 1000000 == 0) { - enc_ext(buffer, x / 1000000, 'S'); - } else { - enc_ext(buffer, x / 1000, 'm'); - } - } else { - enc_ext(buffer, x / 1000000, 'S'); } } -void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer) { - if (timeout.tv_sec < 0) { +void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer) { + if (timeout <= 0) { enc_tiny(buffer); - } else if (timeout.tv_sec == 0) { - enc_nanos(buffer, timeout.tv_nsec); - } else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) { - enc_micros(buffer, - (int64_t)(timeout.tv_sec * 1000000) + - (timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0))); + } else if (timeout < 1000 * GPR_MS_PER_SEC) { + enc_millis(buffer, timeout); } else { - enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0)); + enc_seconds(buffer, + timeout / GPR_MS_PER_SEC + (timeout % GPR_MS_PER_SEC != 0)); } } @@ -121,8 +88,8 @@ static int is_all_whitespace(const char *p, const char *end) { return p == end; } -int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) { - int32_t x = 0; +int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout) { + grpc_millis x = 0; const uint8_t *p = GRPC_SLICE_START_PTR(text); const uint8_t *end = GRPC_SLICE_END_PTR(text); int have_digit = 0; @@ -136,7 +103,7 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) { /* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */ if (x >= (100 * 1000 * 1000)) { if (x != (100 * 1000 * 1000) || digit != 0) { - *timeout = gpr_inf_future(GPR_TIMESPAN); + *timeout = GRPC_MILLIS_INF_FUTURE; return 1; } } @@ -150,22 +117,22 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) { /* decode unit specifier */ switch (*p) { case 'n': - *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN); + *timeout = x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0); break; case 'u': - *timeout = gpr_time_from_micros(x, GPR_TIMESPAN); + *timeout = x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0); break; case 'm': - *timeout = gpr_time_from_millis(x, GPR_TIMESPAN); + *timeout = x; break; case 'S': - *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN); + *timeout = x * GPR_MS_PER_SEC; break; case 'M': - *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN); + *timeout = x * 60 * GPR_MS_PER_SEC; break; case 'H': - *timeout = gpr_time_from_hours(x, GPR_TIMESPAN); + *timeout = x * 60 * 60 * GPR_MS_PER_SEC; break; default: return 0; diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h index 7ff35c4083..c8677719c4 100644 --- a/src/core/lib/transport/timeout_encoding.h +++ b/src/core/lib/transport/timeout_encoding.h @@ -22,13 +22,14 @@ #include <grpc/slice.h> #include <grpc/support/time.h> +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/string.h" #define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1) /* Encode/decode timeouts to the GRPC over HTTP/2 format; encoding may round up arbitrarily */ -void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer); -int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout); +void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer); +int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout); #endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */ diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c index 858664715c..eec47b2cfb 100644 --- a/src/core/lib/transport/transport_op_string.c +++ b/src/core/lib/transport/transport_op_string.c @@ -48,10 +48,9 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) { if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", ")); put_metadata(b, m->md); } - if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) { + if (md.deadline != GRPC_MILLIS_INF_FUTURE) { char *tmp; - gpr_asprintf(&tmp, " deadline=%" PRId64 ".%09d", md.deadline.tv_sec, - md.deadline.tv_nsec); + gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline); gpr_strvec_add(b, tmp); } } diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index ec642b0520..859584a413 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -21,7 +21,6 @@ CORE_SOURCE_FILES = [ 'src/core/lib/support/arena.c', 'src/core/lib/support/atm.c', 'src/core/lib/support/avl.c', - 'src/core/lib/support/backoff.c', 'src/core/lib/support/cmdline.c', 'src/core/lib/support/cpu_iphone.c', 'src/core/lib/support/cpu_linux.c', @@ -62,6 +61,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/support/tmpfile_windows.c', 'src/core/lib/support/wrap_memcpy.c', 'src/core/lib/surface/init.c', + 'src/core/lib/backoff/backoff.c', 'src/core/lib/channel/channel_args.c', 'src/core/lib/channel/channel_stack.c', 'src/core/lib/channel/channel_stack_builder.c', diff --git a/test/core/backoff/BUILD b/test/core/backoff/BUILD new file mode 100644 index 0000000000..4ae762007c --- /dev/null +++ b/test/core/backoff/BUILD @@ -0,0 +1,36 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary") + +licenses(["notice"]) # Apache v2 + +package( + features = [ + "-layering_check", + "-parse_headers", + ], +) + +grpc_cc_test( + name = "backoff_test", + srcs = ["backoff_test.c"], + language = "C", + deps = [ + "//:grpc", + "//test/core/util:grpc_test_util", + "//:gpr", + "//test/core/util:gpr_test_util", + ], +) diff --git a/test/core/backoff/backoff_test.c b/test/core/backoff/backoff_test.c new file mode 100644 index 0000000000..3848b2a54d --- /dev/null +++ b/test/core/backoff/backoff_test.c @@ -0,0 +1,149 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/backoff/backoff.h" + +#include <grpc/support/log.h> + +#include "test/core/util/test_config.h" + +static void test_constant_backoff(void) { + grpc_backoff backoff; + grpc_backoff_init(&backoff, 200 /* initial timeout */, 1.0 /* multiplier */, + 0.0 /* jitter */, 100 /* min timeout */, + 1000 /* max timeout */); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff); + GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200); + for (int i = 0; i < 10000; i++) { + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200); + exec_ctx.now = next; + } + grpc_exec_ctx_finish(&exec_ctx); +} + +static void test_min_connect(void) { + grpc_backoff backoff; + grpc_backoff_init(&backoff, 100 /* initial timeout */, 1.0 /* multiplier */, + 0.0 /* jitter */, 200 /* min timeout */, + 1000 /* max timeout */); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff); + GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200); + grpc_exec_ctx_finish(&exec_ctx); +} + +static void test_no_jitter_backoff(void) { + grpc_backoff backoff; + grpc_backoff_init(&backoff, 2 /* initial timeout */, 2.0 /* multiplier */, + 0.0 /* jitter */, 1 /* min timeout */, + 513 /* max timeout */); + // x_1 = 2 + // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 ) + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + exec_ctx.now = 0; + exec_ctx.now_is_valid = true; + grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff); + GPR_ASSERT(next == 2); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 6); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 14); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 30); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 62); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 126); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 254); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 510); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 1022); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + // Hit the maximum timeout. From this point onwards, retries will increase + // only by max timeout. + GPR_ASSERT(next == 1535); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 2048); + exec_ctx.now = next; + next = grpc_backoff_step(&exec_ctx, &backoff); + GPR_ASSERT(next == 2561); + grpc_exec_ctx_finish(&exec_ctx); +} + +static void test_jitter_backoff(void) { + const int64_t initial_timeout = 500; + const double jitter = 0.1; + grpc_backoff backoff; + grpc_backoff_init(&backoff, initial_timeout, 1.0 /* multiplier */, jitter, + 100 /* min timeout */, 1000 /* max timeout */); + + backoff.rng_state = 0; // force consistent PRNG + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff); + GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 500); + + int64_t expected_next_lower_bound = + (int64_t)((double)initial_timeout * (1 - jitter)); + int64_t expected_next_upper_bound = + (int64_t)((double)initial_timeout * (1 + jitter)); + + for (int i = 0; i < 10000; i++) { + next = grpc_backoff_step(&exec_ctx, &backoff); + + // next-now must be within (jitter*100)% of the previous timeout. + const int64_t timeout_millis = next - grpc_exec_ctx_now(&exec_ctx); + GPR_ASSERT(timeout_millis >= expected_next_lower_bound); + GPR_ASSERT(timeout_millis <= expected_next_upper_bound); + + expected_next_lower_bound = + (int64_t)((double)timeout_millis * (1 - jitter)); + expected_next_upper_bound = + (int64_t)((double)timeout_millis * (1 + jitter)); + exec_ctx.now = next; + } + grpc_exec_ctx_finish(&exec_ctx); +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + gpr_time_init(); + + test_constant_backoff(); + test_min_connect(); + test_no_jitter_backoff(); + test_jitter_backoff(); + + return 0; +} diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c index 7c3614b4a2..a07ef89ba8 100644 --- a/test/core/channel/channel_stack_test.c +++ b/test/core/channel/channel_stack_test.c @@ -125,7 +125,7 @@ static void test_create_channel_stack(void) { .context = NULL, .path = path, .start_time = gpr_now(GPR_CLOCK_MONOTONIC), - .deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC), + .deadline = GRPC_MILLIS_INF_FUTURE, .arena = NULL}; grpc_error *error = grpc_call_stack_init(&exec_ctx, channel_stack, 1, free_call, call_stack, &args); diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c index 364e180963..4597285063 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c @@ -106,7 +106,7 @@ static bool wait_loop(int deadline_seconds, gpr_event *ev) { deadline_seconds--; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_timer_check(&exec_ctx, gpr_now(GPR_CLOCK_MONOTONIC), NULL); + grpc_timer_check(&exec_ctx, NULL); grpc_exec_ctx_finish(&exec_ctx); } return false; diff --git a/test/core/end2end/fixtures/h2_ssl_cert.c b/test/core/end2end/fixtures/h2_ssl_cert.c index 9b1ddadfe4..f0a2ee5430 100644 --- a/test/core/end2end/fixtures/h2_ssl_cert.c +++ b/test/core/end2end/fixtures/h2_ssl_cert.c @@ -319,7 +319,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; - op->flags = 0; + op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY; op->reserved = NULL; op++; error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index a4cfc77bcb..d29401fdc3 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -412,8 +412,8 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, GPR_ASSERT(resolved_addresses->naddrs >= 1); // Connect to requested address. // The connection callback inherits our reference to conn. - const gpr_timespec deadline = gpr_time_add( - gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(10, GPR_TIMESPAN)); + const grpc_millis deadline = + grpc_exec_ctx_now(exec_ctx) + 10 * GPR_MS_PER_SEC; grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done, &conn->server_endpoint, conn->pollset_set, NULL, &resolved_addresses->addrs[0], deadline); @@ -469,14 +469,12 @@ static void thread_main(void* arg) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; do { gpr_ref(&proxy->users); - const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - const gpr_timespec deadline = - gpr_time_add(now, gpr_time_from_seconds(1, GPR_TIMESPAN)); grpc_pollset_worker* worker = NULL; gpr_mu_lock(proxy->mu); GRPC_LOG_IF_ERROR( "grpc_pollset_work", - grpc_pollset_work(&exec_ctx, proxy->pollset, &worker, now, deadline)); + grpc_pollset_work(&exec_ctx, proxy->pollset, &worker, + grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC)); gpr_mu_unlock(proxy->mu); grpc_exec_ctx_flush(&exec_ctx); } while (!gpr_unref(&proxy->users)); diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c index 1228c9fe9a..0a787bbf30 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.c +++ b/test/core/end2end/fuzzers/api_fuzzer.c @@ -60,7 +60,9 @@ extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); static gpr_timespec now_impl(gpr_clock_type clock_type) { GPR_ASSERT(clock_type != GPR_TIMESPAN); - return g_now; + gpr_timespec ts = g_now; + ts.clock_type = clock_type; + return ts; } //////////////////////////////////////////////////////////////////////////////// @@ -407,10 +409,8 @@ void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr, r->addrs = addresses; r->lb_addrs = NULL; grpc_timer_init( - exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(1, GPR_TIMESPAN)), - GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx), - gpr_now(GPR_CLOCK_MONOTONIC)); + exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx), + GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx)); } grpc_ares_request *my_dns_lookup_ares( @@ -424,10 +424,8 @@ grpc_ares_request *my_dns_lookup_ares( r->addrs = NULL; r->lb_addrs = lb_addrs; grpc_timer_init( - exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(1, GPR_TIMESPAN)), - GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx), - gpr_now(GPR_CLOCK_MONOTONIC)); + exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx), + GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx)); return NULL; } @@ -487,10 +485,8 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, fc->ep = ep; fc->deadline = deadline; grpc_timer_init( - exec_ctx, &fc->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_millis(1, GPR_TIMESPAN)), - GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx), - gpr_now(GPR_CLOCK_MONOTONIC)); + exec_ctx, &fc->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx), + GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx)); } static void my_tcp_client_connect(grpc_exec_ctx *exec_ctx, diff --git a/test/core/end2end/invalid_call_argument_test.c b/test/core/end2end/invalid_call_argument_test.c index bf0d08adec..e3fd5a8fbe 100644 --- a/test/core/end2end/invalid_call_argument_test.c +++ b/test/core/end2end/invalid_call_argument_test.c @@ -92,7 +92,7 @@ static void prepare_test(int is_client) { op = g_state.ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; - op->flags = 0; + op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY; op->reserved = NULL; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(g_state.call, g_state.ops, diff --git a/test/core/end2end/tests/bad_ping.c b/test/core/end2end/tests/bad_ping.c index c97d11b306..d442f12480 100644 --- a/test/core/end2end/tests/bad_ping.c +++ b/test/core/end2end/tests/bad_ping.c @@ -155,14 +155,16 @@ static void test_bad_ping(grpc_end2end_test_config config) { cq_verify(cqv); // Send too many pings to the server to trigger the punishment: - // The first ping is sent after data frames, it won't trigger a ping strike. - // Each of the following pings will trigger a ping strike, and we need at - // least (MAX_PING_STRIKES + 1) strikes to trigger the punishment. So - // (MAX_PING_STRIKES + 2) pings are needed here. + // Each ping will trigger a ping strike, and we need at least MAX_PING_STRIKES + // strikes to trigger the punishment. So (MAX_PING_STRIKES + 1) pings are + // needed here. int i; - for (i = 200; i < 202 + MAX_PING_STRIKES; i++) { - grpc_channel_ping(f.client, f.cq, tag(i), NULL); - CQ_EXPECT_COMPLETION(cqv, tag(i), 1); + for (i = 1; i <= MAX_PING_STRIKES + 1; i++) { + grpc_channel_ping(f.client, f.cq, tag(200 + i), NULL); + CQ_EXPECT_COMPLETION(cqv, tag(200 + i), 1); + if (i == MAX_PING_STRIKES + 1) { + CQ_EXPECT_COMPLETION(cqv, tag(1), 1); + } cq_verify(cqv); } @@ -190,7 +192,6 @@ static void test_bad_ping(grpc_end2end_test_config config) { GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(102), 1); - CQ_EXPECT_COMPLETION(cqv, tag(1), 1); cq_verify(cqv); grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead)); diff --git a/test/core/end2end/tests/keepalive_timeout.c b/test/core/end2end/tests/keepalive_timeout.c index 8d01f23c00..c4280149c7 100644 --- a/test/core/end2end/tests/keepalive_timeout.c +++ b/test/core/end2end/tests/keepalive_timeout.c @@ -98,21 +98,21 @@ static void test_keepalive_timeout(grpc_end2end_test_config config) { grpc_byte_buffer *response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1); - grpc_arg keepalive_args[] = {{.type = GRPC_ARG_INTEGER, - .key = GRPC_ARG_KEEPALIVE_TIME_MS, - .value.integer = 1500}, - {.type = GRPC_ARG_INTEGER, - .key = GRPC_ARG_KEEPALIVE_TIMEOUT_MS, - .value.integer = 0}, - {.type = GRPC_ARG_INTEGER, - .key = GRPC_ARG_HTTP2_BDP_PROBE, - .value.integer = 0}}; - - grpc_channel_args client_args = {.num_args = GPR_ARRAY_SIZE(keepalive_args), - .args = keepalive_args}; + grpc_arg keepalive_arg_elems[] = {{.type = GRPC_ARG_INTEGER, + .key = GRPC_ARG_KEEPALIVE_TIME_MS, + .value.integer = 1500}, + {.type = GRPC_ARG_INTEGER, + .key = GRPC_ARG_KEEPALIVE_TIMEOUT_MS, + .value.integer = 0}, + {.type = GRPC_ARG_INTEGER, + .key = GRPC_ARG_HTTP2_BDP_PROBE, + .value.integer = 0}}; + grpc_channel_args keepalive_args = { + .num_args = GPR_ARRAY_SIZE(keepalive_arg_elems), + .args = keepalive_arg_elems}; grpc_end2end_test_fixture f = - begin_test(config, "keepalive_timeout", &client_args, NULL); + begin_test(config, "keepalive_timeout", &keepalive_args, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c index 8a53903763..cc1c16d695 100644 --- a/test/core/http/httpcli_test.c +++ b/test/core/http/httpcli_test.c @@ -35,8 +35,9 @@ static grpc_httpcli_context g_context; static gpr_mu *g_mu; static grpc_polling_entity g_pops; -static gpr_timespec n_seconds_time(int seconds) { - return grpc_timeout_seconds_to_deadline(seconds); +static grpc_millis n_seconds_time(int seconds) { + return grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(seconds)); } static void on_finish(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -86,8 +87,7 @@ static void test_get(int port) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - n_seconds_time(1)))); + &worker, n_seconds_time(1)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -128,8 +128,7 @@ static void test_post(int port) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - n_seconds_time(1)))); + &worker, n_seconds_time(1)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c index c7455bd8df..f8a3cfdd76 100644 --- a/test/core/http/httpscli_test.c +++ b/test/core/http/httpscli_test.c @@ -35,8 +35,9 @@ static grpc_httpcli_context g_context; static gpr_mu *g_mu; static grpc_polling_entity g_pops; -static gpr_timespec n_seconds_time(int seconds) { - return grpc_timeout_seconds_to_deadline(seconds); +static grpc_millis n_seconds_time(int seconds) { + return grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(seconds)); } static void on_finish(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -87,8 +88,7 @@ static void test_get(int port) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - n_seconds_time(1)))); + &worker, n_seconds_time(1)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -130,8 +130,7 @@ static void test_post(int port) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - n_seconds_time(1)))); + &worker, n_seconds_time(1)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index f8570edde7..61e901f645 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -176,10 +176,11 @@ static void read_and_write_test(grpc_endpoint_test_config config, size_t num_bytes, size_t write_size, size_t slice_size, bool shutdown) { struct read_and_write_test_state state; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_endpoint_test_fixture f = begin_test(config, "read_and_write_test", slice_size); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); gpr_log(GPR_DEBUG, "num_bytes=%" PRIuPTR " write_size=%" PRIuPTR " slice_size=%" PRIuPTR " shutdown=%d", num_bytes, write_size, slice_size, shutdown); @@ -235,11 +236,10 @@ static void read_and_write_test(grpc_endpoint_test_config config, gpr_mu_lock(g_mu); while (!state.read_done || !state.write_done) { grpc_pollset_worker *worker = NULL; - GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0); + GPR_ASSERT(grpc_exec_ctx_now(&exec_ctx) < deadline); GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); } gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(&exec_ctx); @@ -265,14 +265,14 @@ static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count, int want_fail_count) { grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(g_mu); - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10); - while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 && + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); + while (grpc_exec_ctx_now(exec_ctx) < deadline && *fail_count < want_fail_count) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(exec_ctx, g_pollset, &worker, - gpr_now(deadline.clock_type), deadline))); + grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(g_mu); diff --git a/test/core/iomgr/ev_epollsig_linux_test.c b/test/core/iomgr/ev_epollsig_linux_test.c index cca07bf002..37aadacd49 100644 --- a/test/core/iomgr/ev_epollsig_linux_test.c +++ b/test/core/iomgr/ev_epollsig_linux_test.c @@ -238,10 +238,8 @@ static void test_threading_loop(void *arg) { grpc_pollset_worker *worker; gpr_mu_lock(shared->mu); GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, shared->pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + "pollset_work", grpc_pollset_work(&exec_ctx, shared->pollset, &worker, + GRPC_MILLIS_INF_FUTURE))); gpr_mu_unlock(shared->mu); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c index 881277a8d6..1c62f34d3e 100644 --- a/test/core/iomgr/fd_posix_test.c +++ b/test/core/iomgr/fd_posix_test.c @@ -252,10 +252,8 @@ static void server_wait_and_shutdown(server *sv) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, + GRPC_MILLIS_INF_FUTURE))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -371,10 +369,8 @@ static void client_wait_and_shutdown(client *cl) { grpc_pollset_worker *worker = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, + GRPC_MILLIS_INF_FUTURE))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -477,10 +473,8 @@ static void test_grpc_fd_change(void) { while (a.cb_that_ran == NULL) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, + GRPC_MILLIS_INF_FUTURE))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -503,10 +497,8 @@ static void test_grpc_fd_change(void) { while (b.cb_that_ran == NULL) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))); + "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, + GRPC_MILLIS_INF_FUTURE))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c index 70efca8b16..dee526c22a 100644 --- a/test/core/iomgr/pollset_set_test.c +++ b/test/core/iomgr/pollset_set_test.c @@ -204,7 +204,7 @@ static void pollset_set_test_basic() { */ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; - gpr_timespec deadline; + grpc_millis deadline; test_fd tfds[10]; test_pollset pollsets[3]; @@ -257,10 +257,10 @@ static void pollset_set_test_basic() { make_test_fds_readable(tfds, num_fds); gpr_mu_lock(pollsets[i].mu); - deadline = grpc_timeout_milliseconds_to_deadline(2); + deadline = grpc_timespec_to_millis_round_up( + grpc_timeout_milliseconds_to_deadline(2)); GPR_ASSERT(GRPC_ERROR_NONE == - grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)); + grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker, deadline)); gpr_mu_unlock(pollsets[i].mu); grpc_exec_ctx_flush(&exec_ctx); @@ -309,7 +309,7 @@ void pollset_set_test_dup_fds() { */ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; - gpr_timespec deadline; + grpc_millis deadline; test_fd tfds[3]; test_pollset pollset; @@ -339,10 +339,10 @@ void pollset_set_test_dup_fds() { make_test_fds_readable(tfds, num_fds); gpr_mu_lock(pollset.mu); - deadline = grpc_timeout_milliseconds_to_deadline(2); + deadline = grpc_timespec_to_millis_round_up( + grpc_timeout_milliseconds_to_deadline(2)); GPR_ASSERT(GRPC_ERROR_NONE == - grpc_pollset_work(&exec_ctx, pollset.ps, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)); + grpc_pollset_work(&exec_ctx, pollset.ps, &worker, deadline)); gpr_mu_unlock(pollset.mu); grpc_exec_ctx_flush(&exec_ctx); @@ -382,7 +382,7 @@ void pollset_set_test_empty_pollset() { */ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; - gpr_timespec deadline; + grpc_millis deadline; test_fd tfds[3]; test_pollset pollsets[2]; @@ -408,10 +408,10 @@ void pollset_set_test_empty_pollset() { make_test_fds_readable(tfds, num_fds); gpr_mu_lock(pollsets[0].mu); - deadline = grpc_timeout_milliseconds_to_deadline(2); + deadline = grpc_timespec_to_millis_round_up( + grpc_timeout_milliseconds_to_deadline(2)); GPR_ASSERT(GRPC_ERROR_NONE == - grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)); + grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker, deadline)); gpr_mu_unlock(pollsets[0].mu); grpc_exec_ctx_flush(&exec_ctx); diff --git a/test/core/iomgr/resolve_address_posix_test.c b/test/core/iomgr/resolve_address_posix_test.c index e4be99f03c..cb9d6080fb 100644 --- a/test/core/iomgr/resolve_address_posix_test.c +++ b/test/core/iomgr/resolve_address_posix_test.c @@ -72,35 +72,33 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { gpr_free(args->pollset); } -static gpr_timespec n_sec_deadline(int seconds) { - return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_seconds(seconds, GPR_TIMESPAN)); +static grpc_millis n_sec_deadline(int seconds) { + return grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(seconds)); } static void actually_poll(void *argsp) { args_struct *args = argsp; - gpr_timespec deadline = n_sec_deadline(10); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&args->done_atm) != 0; if (done) { break; } - gpr_timespec time_left = - gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); - gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, - time_left.tv_sec, time_left.tv_nsec); - GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); + grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left); + GPR_ASSERT(time_left >= 0); grpc_pollset_worker *worker = NULL; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(args->mu); - GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, args->pollset, &worker, - gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); + GRPC_LOG_IF_ERROR("pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, + n_sec_deadline(1))); gpr_mu_unlock(args->mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); } gpr_event_set(&args->ev, (void *)1); + grpc_exec_ctx_finish(&exec_ctx); } static void poll_pollset_until_request_done(args_struct *args) { diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 1110c04b6e..178bbbb95f 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -68,34 +68,32 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { gpr_free(args->pollset); } -static gpr_timespec n_sec_deadline(int seconds) { - return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_seconds(seconds, GPR_TIMESPAN)); +static grpc_millis n_sec_deadline(int seconds) { + return grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(seconds)); } static void poll_pollset_until_request_done(args_struct *args) { - gpr_timespec deadline = n_sec_deadline(10); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&args->done_atm) != 0; if (done) { break; } - gpr_timespec time_left = - gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); - gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, - time_left.tv_sec, time_left.tv_nsec); - GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); + grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left); + GPR_ASSERT(time_left >= 0); grpc_pollset_worker *worker = NULL; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(args->mu); - GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, args->pollset, &worker, - gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); + GRPC_LOG_IF_ERROR("pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, + n_sec_deadline(1))); gpr_mu_unlock(args->mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); } gpr_event_set(&args->ev, (void *)1); + grpc_exec_ctx_finish(&exec_ctx); } static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp, diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c index 1032da942b..b8b76d1c42 100644 --- a/test/core/iomgr/tcp_client_posix_test.c +++ b/test/core/iomgr/tcp_client_posix_test.c @@ -46,8 +46,8 @@ static grpc_pollset *g_pollset; static int g_connections_complete = 0; static grpc_endpoint *g_connecting = NULL; -static gpr_timespec test_deadline(void) { - return grpc_timeout_seconds_to_deadline(10); +static grpc_millis test_deadline(void) { + return grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); } static void finish_connection() { @@ -109,7 +109,7 @@ void test_succeeds(void) { (socklen_t *)&resolved_addr.len) == 0); GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, - &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); + &resolved_addr, GRPC_MILLIS_INF_FUTURE); /* await the connection */ do { @@ -127,8 +127,8 @@ void test_succeeds(void) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - grpc_timeout_seconds_to_deadline(5)))); + grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(5))))); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); @@ -159,25 +159,24 @@ void test_fails(void) { /* connect to a broken address */ GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL, - &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); + &resolved_addr, GRPC_MILLIS_INF_FUTURE); gpr_mu_lock(g_mu); /* wait for the connection callback to finish */ while (g_connections_complete == connections_complete_before) { grpc_pollset_worker *worker = NULL; - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec polling_deadline = test_deadline(); - switch (grpc_timer_check(&exec_ctx, now, &polling_deadline)) { + grpc_millis polling_deadline = test_deadline(); + switch (grpc_timer_check(&exec_ctx, &polling_deadline)) { case GRPC_TIMERS_FIRED: break; case GRPC_TIMERS_NOT_CHECKED: - polling_deadline = now; + polling_deadline = 0; /* fall through */ case GRPC_TIMERS_CHECKED_AND_EMPTY: GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, - now, polling_deadline))); + polling_deadline))); break; } gpr_mu_unlock(g_mu); diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index cfb3cf897c..6501160c6f 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -162,7 +162,8 @@ static void read_test(size_t num_bytes, size_t slice_size) { grpc_endpoint *ep; struct read_socket_state state; size_t written_bytes; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Read test of size %" PRIuPTR ", slice size %" PRIuPTR, @@ -194,8 +195,7 @@ static void read_test(size_t num_bytes, size_t slice_size) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -215,7 +215,8 @@ static void large_read_test(size_t slice_size) { grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size); @@ -246,8 +247,7 @@ static void large_read_test(size_t slice_size) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -319,8 +319,8 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - grpc_timeout_milliseconds_to_deadline(10)))); + grpc_timespec_to_millis_round_up( + grpc_timeout_milliseconds_to_deadline(10))))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); do { @@ -353,7 +353,8 @@ static void write_test(size_t num_bytes, size_t slice_size) { uint8_t current_data = 0; grpc_slice_buffer outgoing; grpc_closure write_done_closure; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, @@ -390,8 +391,7 @@ static void write_test(size_t num_bytes, size_t slice_size) { } GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -419,7 +419,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { struct read_socket_state state; size_t written_bytes; int fd; - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure fd_released_cb; int fd_released_done = 0; @@ -457,8 +458,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR, state.read_bytes, state.target_read_bytes); gpr_mu_unlock(g_mu); @@ -476,8 +476,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done); } gpr_mu_unlock(g_mu); diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c index 4d84608376..782dfb413a 100644 --- a/test/core/iomgr/tcp_server_posix_test.c +++ b/test/core/iomgr/tcp_server_posix_test.c @@ -230,7 +230,8 @@ static void test_no_op_with_port_and_start(void) { static grpc_error *tcp_connect(grpc_exec_ctx *exec_ctx, const test_addr *remote, on_connect_result *result) { - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10); + grpc_millis deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); int clifd; int nconnects_before; const struct sockaddr *remote_addr = @@ -253,11 +254,10 @@ static grpc_error *tcp_connect(grpc_exec_ctx *exec_ctx, const test_addr *remote, } gpr_log(GPR_DEBUG, "wait"); while (g_nconnects == nconnects_before && - gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { + deadline > grpc_exec_ctx_now(exec_ctx)) { grpc_pollset_worker *worker = NULL; grpc_error *err; - if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)) != + if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline)) != GRPC_ERROR_NONE) { gpr_mu_unlock(g_mu); close(clifd); diff --git a/test/core/iomgr/timer_list_test.c b/test/core/iomgr/timer_list_test.c index 5f8b01fdc4..c3d9f9d88d 100644 --- a/test/core/iomgr/timer_list_test.c +++ b/test/core/iomgr/timer_list_test.c @@ -41,51 +41,45 @@ static void cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } static void add_test(void) { - gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME); int i; grpc_timer timers[20]; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "add_test"); - grpc_timer_list_init(start); + grpc_timer_list_init(&exec_ctx); grpc_timer_trace.value = 1; grpc_timer_check_trace.value = 1; memset(cb_called, 0, sizeof(cb_called)); + grpc_millis start = grpc_exec_ctx_now(&exec_ctx); + /* 10 ms timers. will expire in the current epoch */ for (i = 0; i < 10; i++) { - grpc_timer_init( - &exec_ctx, &timers[i], - gpr_time_add(start, gpr_time_from_millis(10, GPR_TIMESPAN)), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, grpc_schedule_on_exec_ctx), - start); + grpc_timer_init(&exec_ctx, &timers[i], start + 10, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, + grpc_schedule_on_exec_ctx)); } /* 1010 ms timers. will expire in the next epoch */ for (i = 10; i < 20; i++) { - grpc_timer_init( - &exec_ctx, &timers[i], - gpr_time_add(start, gpr_time_from_millis(1010, GPR_TIMESPAN)), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, grpc_schedule_on_exec_ctx), - start); + grpc_timer_init(&exec_ctx, &timers[i], start + 1010, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, + grpc_schedule_on_exec_ctx)); } /* collect timers. Only the first batch should be ready. */ - GPR_ASSERT(grpc_timer_check( - &exec_ctx, - gpr_time_add(start, gpr_time_from_millis(500, GPR_TIMESPAN)), - NULL) == GRPC_TIMERS_FIRED); + exec_ctx.now = start + 500; + GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED); grpc_exec_ctx_finish(&exec_ctx); for (i = 0; i < 20; i++) { GPR_ASSERT(cb_called[i][1] == (i < 10)); GPR_ASSERT(cb_called[i][0] == 0); } - GPR_ASSERT(grpc_timer_check( - &exec_ctx, - gpr_time_add(start, gpr_time_from_millis(600, GPR_TIMESPAN)), - NULL) == GRPC_TIMERS_CHECKED_AND_EMPTY); + exec_ctx.now = start + 600; + GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == + GRPC_TIMERS_CHECKED_AND_EMPTY); grpc_exec_ctx_finish(&exec_ctx); for (i = 0; i < 30; i++) { GPR_ASSERT(cb_called[i][1] == (i < 10)); @@ -93,20 +87,17 @@ static void add_test(void) { } /* collect the rest of the timers */ - GPR_ASSERT(grpc_timer_check( - &exec_ctx, - gpr_time_add(start, gpr_time_from_millis(1500, GPR_TIMESPAN)), - NULL) == GRPC_TIMERS_FIRED); + exec_ctx.now = start + 1500; + GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED); grpc_exec_ctx_finish(&exec_ctx); for (i = 0; i < 30; i++) { GPR_ASSERT(cb_called[i][1] == (i < 20)); GPR_ASSERT(cb_called[i][0] == 0); } - GPR_ASSERT(grpc_timer_check( - &exec_ctx, - gpr_time_add(start, gpr_time_from_millis(1600, GPR_TIMESPAN)), - NULL) == GRPC_TIMERS_CHECKED_AND_EMPTY); + exec_ctx.now = start + 1600; + GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == + GRPC_TIMERS_CHECKED_AND_EMPTY); for (i = 0; i < 30; i++) { GPR_ASSERT(cb_called[i][1] == (i < 20)); GPR_ASSERT(cb_called[i][0] == 0); @@ -116,10 +107,6 @@ static void add_test(void) { grpc_exec_ctx_finish(&exec_ctx); } -static gpr_timespec tfm(int m) { - return gpr_time_from_millis(m, GPR_CLOCK_REALTIME); -} - /* Cleaning up a list with pending timers. */ void destruction_test(void) { grpc_timer timers[5]; @@ -127,32 +114,30 @@ void destruction_test(void) { gpr_log(GPR_INFO, "destruction_test"); - grpc_timer_list_init(gpr_time_0(GPR_CLOCK_REALTIME)); + exec_ctx.now_is_valid = true; + exec_ctx.now = 0; + grpc_timer_list_init(&exec_ctx); grpc_timer_trace.value = 1; grpc_timer_check_trace.value = 1; memset(cb_called, 0, sizeof(cb_called)); grpc_timer_init( - &exec_ctx, &timers[0], tfm(100), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)0, grpc_schedule_on_exec_ctx), - gpr_time_0(GPR_CLOCK_REALTIME)); + &exec_ctx, &timers[0], 100, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)0, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &exec_ctx, &timers[1], tfm(3), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)1, grpc_schedule_on_exec_ctx), - gpr_time_0(GPR_CLOCK_REALTIME)); + &exec_ctx, &timers[1], 3, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)1, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &exec_ctx, &timers[2], tfm(100), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)2, grpc_schedule_on_exec_ctx), - gpr_time_0(GPR_CLOCK_REALTIME)); + &exec_ctx, &timers[2], 100, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)2, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &exec_ctx, &timers[3], tfm(3), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)3, grpc_schedule_on_exec_ctx), - gpr_time_0(GPR_CLOCK_REALTIME)); + &exec_ctx, &timers[3], 3, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)3, grpc_schedule_on_exec_ctx)); grpc_timer_init( - &exec_ctx, &timers[4], tfm(1), - GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)4, grpc_schedule_on_exec_ctx), - gpr_time_0(GPR_CLOCK_REALTIME)); - GPR_ASSERT(grpc_timer_check(&exec_ctx, tfm(2), NULL) == GRPC_TIMERS_FIRED); + &exec_ctx, &timers[4], 1, + GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)4, grpc_schedule_on_exec_ctx)); + exec_ctx.now = 2; + GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED); grpc_exec_ctx_finish(&exec_ctx); GPR_ASSERT(1 == cb_called[4][1]); grpc_timer_cancel(&exec_ctx, &timers[0]); diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c index 1d051bea62..2e44d0abc8 100644 --- a/test/core/iomgr/udp_server_test.c +++ b/test/core/iomgr/udp_server_test.c @@ -226,7 +226,7 @@ static void test_receive(int number_of_clients) { grpc_udp_server *s = grpc_udp_server_create(NULL); int i; int number_of_reads_before; - gpr_timespec deadline; + grpc_millis deadline; grpc_pollset *pollsets[1]; LOG_TEST("test_receive"); gpr_log(GPR_INFO, "clients=%d", number_of_clients); @@ -252,7 +252,8 @@ static void test_receive(int number_of_clients) { gpr_mu_lock(g_mu); for (i = 0; i < number_of_clients; i++) { - deadline = grpc_timeout_seconds_to_deadline(10); + deadline = + grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10)); number_of_reads_before = g_number_of_reads; /* Create a socket, send a packet to the UDP server. */ @@ -262,14 +263,13 @@ static void test_receive(int number_of_clients) { (socklen_t)resolved_addr.len) == 0); GPR_ASSERT(5 == write(clifd, "hello", 5)); while (g_number_of_reads == number_of_reads_before && - gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { + deadline > grpc_exec_ctx_now(&exec_ctx)) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline))); + grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline))); gpr_mu_unlock(g_mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); } GPR_ASSERT(g_number_of_reads == number_of_reads_before + 1); diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c index 441c431135..fa8f1089c3 100644 --- a/test/core/security/credentials_test.c +++ b/test/core/security/credentials_test.c @@ -194,14 +194,13 @@ static void test_add_abunch_to_md_array(void) { static void test_oauth2_token_fetcher_creds_parsing_ok(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, valid_oauth2_json_response); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( &exec_ctx, &response, &token_md, &token_lifetime) == GRPC_CREDENTIALS_OK); - GPR_ASSERT(token_lifetime.tv_sec == 3599); - GPR_ASSERT(token_lifetime.tv_nsec == 0); + GPR_ASSERT(token_lifetime == 3599 * GPR_MS_PER_SEC); GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(token_md), "authorization") == 0); GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(token_md), "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_") == @@ -214,7 +213,7 @@ static void test_oauth2_token_fetcher_creds_parsing_ok(void) { static void test_oauth2_token_fetcher_creds_parsing_bad_http_status(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(401, valid_oauth2_json_response); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( @@ -227,7 +226,7 @@ static void test_oauth2_token_fetcher_creds_parsing_bad_http_status(void) { static void test_oauth2_token_fetcher_creds_parsing_empty_http_body(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, ""); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( &exec_ctx, &response, &token_md, &token_lifetime) == @@ -239,7 +238,7 @@ static void test_oauth2_token_fetcher_creds_parsing_empty_http_body(void) { static void test_oauth2_token_fetcher_creds_parsing_invalid_json(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -255,7 +254,7 @@ static void test_oauth2_token_fetcher_creds_parsing_invalid_json(void) { static void test_oauth2_token_fetcher_creds_parsing_missing_token(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, "{" " \"expires_in\":3599, " @@ -270,7 +269,7 @@ static void test_oauth2_token_fetcher_creds_parsing_missing_token(void) { static void test_oauth2_token_fetcher_creds_parsing_missing_token_type(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -287,7 +286,7 @@ static void test_oauth2_token_fetcher_creds_parsing_missing_token_lifetime( void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_mdelem token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_httpcli_response response = http_response(200, "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\"," @@ -555,7 +554,7 @@ static void validate_compute_engine_http_request( static int compute_engine_httpcli_get_success_override( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { validate_compute_engine_http_request(request); *response = http_response(200, valid_oauth2_json_response); @@ -565,7 +564,7 @@ static int compute_engine_httpcli_get_success_override( static int compute_engine_httpcli_get_failure_override( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { validate_compute_engine_http_request(request); *response = http_response(403, "Not Authorized."); @@ -575,7 +574,7 @@ static int compute_engine_httpcli_get_failure_override( static int httpcli_post_should_not_be_called( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, gpr_timespec deadline, + const char *body_bytes, size_t body_size, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { GPR_ASSERT("HTTP POST should not be called" == NULL); return 1; @@ -583,7 +582,7 @@ static int httpcli_post_should_not_be_called( static int httpcli_get_should_not_be_called(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { GPR_ASSERT("HTTP GET should not be called" == NULL); @@ -663,7 +662,7 @@ static void validate_refresh_token_http_request( static int refresh_token_httpcli_post_success( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body, size_t body_size, gpr_timespec deadline, + const char *body, size_t body_size, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { validate_refresh_token_http_request(request, body, body_size); *response = http_response(200, valid_oauth2_json_response); @@ -673,7 +672,7 @@ static int refresh_token_httpcli_post_success( static int refresh_token_httpcli_post_failure( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body, size_t body_size, gpr_timespec deadline, + const char *body, size_t body_size, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { validate_refresh_token_http_request(request, body, body_size); *response = http_response(403, "Not Authorized."); @@ -932,7 +931,7 @@ static void test_google_default_creds_refresh_token(void) { static int default_creds_gce_detection_httpcli_get_success_override( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, ""); grpc_http_header *headers = gpr_malloc(sizeof(*headers) * 1); @@ -996,7 +995,7 @@ static void test_google_default_creds_gce(void) { static int default_creds_gce_detection_httpcli_get_failure_override( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { /* No magic header. */ GPR_ASSERT(strcmp(request->http.path, "/") == 0); diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c index 9b17fb516d..a4bfe0130e 100644 --- a/test/core/security/jwt_verifier_test.c +++ b/test/core/security/jwt_verifier_test.c @@ -324,7 +324,7 @@ static grpc_httpcli_response http_response(int status, char *body) { static int httpcli_post_should_not_be_called( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, gpr_timespec deadline, + const char *body_bytes, size_t body_size, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { GPR_ASSERT("HTTP POST should not be called" == NULL); return 1; @@ -332,7 +332,7 @@ static int httpcli_post_should_not_be_called( static int httpcli_get_google_keys_for_email( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, good_google_email_keys()); GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); @@ -379,7 +379,7 @@ static void test_jwt_verifier_google_email_issuer_success(void) { static int httpcli_get_custom_keys_for_email( grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, gpr_strdup(good_jwk_set)); GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); @@ -413,7 +413,7 @@ static void test_jwt_verifier_custom_email_issuer_success(void) { static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, gpr_strdup(good_jwk_set)); GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); @@ -425,7 +425,7 @@ static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx, static int httpcli_get_openid_config(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, gpr_strdup(good_openid_config)); @@ -471,7 +471,7 @@ static void on_verification_key_retrieval_error(grpc_exec_ctx *exec_ctx, static int httpcli_get_bad_json(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { *response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}")); GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl); @@ -581,7 +581,7 @@ static void test_jwt_verifier_bad_signature(void) { static int httpcli_get_should_not_be_called(grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - gpr_timespec deadline, + grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { GPR_ASSERT(0); diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c index d240403a29..73d6c5bc7d 100644 --- a/test/core/security/oauth2_utils.c +++ b/test/core/security/oauth2_utils.c @@ -104,8 +104,7 @@ char *grpc_test_fetch_oauth2_token_with_credentials( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&request.pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))) { + &worker, GRPC_MILLIS_INF_FUTURE))) { request.is_done = true; } } diff --git a/test/core/security/print_google_default_creds_token.c b/test/core/security/print_google_default_creds_token.c index 3144717a85..29c38dfdf8 100644 --- a/test/core/security/print_google_default_creds_token.c +++ b/test/core/security/print_google_default_creds_token.c @@ -110,8 +110,7 @@ int main(int argc, char **argv) { "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&sync.pops), &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))) + GRPC_MILLIS_INF_FUTURE))) sync.is_done = true; gpr_mu_unlock(sync.mu); grpc_exec_ctx_flush(&exec_ctx); diff --git a/test/core/security/ssl_server_fuzzer.c b/test/core/security/ssl_server_fuzzer.c index 9858b11c7c..f9b754b8f2 100644 --- a/test/core/security/ssl_server_fuzzer.c +++ b/test/core/security/ssl_server_fuzzer.c @@ -84,8 +84,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { grpc_security_status status = grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc); GPR_ASSERT(status == GRPC_SECURITY_OK); - gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(1, GPR_TIMESPAN)); + grpc_millis deadline = GPR_MS_PER_SEC + grpc_exec_ctx_now(&exec_ctx); struct handshake_state state; state.done_callback_called = false; diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c index 5faa6352a8..cec6fb94b4 100644 --- a/test/core/security/verify_jwt.c +++ b/test/core/security/verify_jwt.c @@ -102,11 +102,9 @@ int main(int argc, char **argv) { gpr_mu_lock(sync.mu); while (!sync.is_done) { grpc_pollset_worker *worker = NULL; - if (!GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, sync.pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))) + if (!GRPC_LOG_IF_ERROR("pollset_work", + grpc_pollset_work(&exec_ctx, sync.pollset, &worker, + GRPC_MILLIS_INF_FUTURE))) sync.is_done = true; gpr_mu_unlock(sync.mu); grpc_exec_ctx_flush(&exec_ctx); diff --git a/test/core/support/BUILD b/test/core/support/BUILD index 096576e13c..407c3eff7b 100644 --- a/test/core/support/BUILD +++ b/test/core/support/BUILD @@ -39,16 +39,6 @@ grpc_cc_test( ) grpc_cc_test( - name = "backoff_test", - srcs = ["backoff_test.c"], - language = "C", - deps = [ - "//:gpr", - "//test/core/util:gpr_test_util", - ], -) - -grpc_cc_test( name = "cmdline_test", srcs = ["cmdline_test.c"], language = "C", diff --git a/test/core/support/backoff_test.c b/test/core/support/backoff_test.c deleted file mode 100644 index 23e3005af0..0000000000 --- a/test/core/support/backoff_test.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/support/backoff.h" - -#include <grpc/support/log.h> - -#include "test/core/util/test_config.h" - -static void test_constant_backoff(void) { - gpr_backoff backoff; - gpr_backoff_init(&backoff, 200 /* initial timeout */, 1.0 /* multiplier */, - 0.0 /* jitter */, 100 /* min timeout */, - 1000 /* max timeout */); - - gpr_timespec now = gpr_time_0(GPR_TIMESPAN); - gpr_timespec next = gpr_backoff_begin(&backoff, now); - GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200); - for (int i = 0; i < 10000; i++) { - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200); - now = next; - } -} - -static void test_min_connect(void) { - gpr_backoff backoff; - gpr_backoff_init(&backoff, 100 /* initial timeout */, 1.0 /* multiplier */, - 0.0 /* jitter */, 200 /* min timeout */, - 1000 /* max timeout */); - - gpr_timespec now = gpr_time_0(GPR_TIMESPAN); - gpr_timespec next = gpr_backoff_begin(&backoff, now); - GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200); -} - -static void test_no_jitter_backoff(void) { - gpr_backoff backoff; - gpr_backoff_init(&backoff, 2 /* initial timeout */, 2.0 /* multiplier */, - 0.0 /* jitter */, 1 /* min timeout */, - 513 /* max timeout */); - // x_1 = 2 - // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 ) - gpr_timespec now = gpr_time_0(GPR_TIMESPAN); - gpr_timespec next = gpr_backoff_begin(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(6, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(14, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(30, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(62, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(126, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(254, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(510, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(1022, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - // Hit the maximum timeout. From this point onwards, retries will increase - // only by max timeout. - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(1535, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2048, GPR_TIMESPAN), next) == 0); - now = next; - next = gpr_backoff_step(&backoff, now); - GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2561, GPR_TIMESPAN), next) == 0); -} - -static void test_jitter_backoff(void) { - const int64_t initial_timeout = 500; - const double jitter = 0.1; - gpr_backoff backoff; - gpr_backoff_init(&backoff, initial_timeout, 1.0 /* multiplier */, jitter, - 100 /* min timeout */, 1000 /* max timeout */); - - backoff.rng_state = 0; // force consistent PRNG - - gpr_timespec now = gpr_time_0(GPR_TIMESPAN); - gpr_timespec next = gpr_backoff_begin(&backoff, now); - GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 500); - - int64_t expected_next_lower_bound = - (int64_t)((double)initial_timeout * (1 - jitter)); - int64_t expected_next_upper_bound = - (int64_t)((double)initial_timeout * (1 + jitter)); - - for (int i = 0; i < 10000; i++) { - next = gpr_backoff_step(&backoff, now); - - // next-now must be within (jitter*100)% of the previous timeout. - const int64_t timeout_millis = gpr_time_to_millis(gpr_time_sub(next, now)); - GPR_ASSERT(timeout_millis >= expected_next_lower_bound); - GPR_ASSERT(timeout_millis <= expected_next_upper_bound); - - expected_next_lower_bound = - (int64_t)((double)timeout_millis * (1 - jitter)); - expected_next_upper_bound = - (int64_t)((double)timeout_millis * (1 + jitter)); - now = next; - } -} - -int main(int argc, char **argv) { - grpc_test_init(argc, argv); - gpr_time_init(); - - test_constant_backoff(); - test_min_connect(); - test_no_jitter_backoff(); - test_jitter_backoff(); - - return 0; -} diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c index ec2cd8610b..3595885ff6 100644 --- a/test/core/surface/concurrent_connectivity_test.c +++ b/test/core/surface/concurrent_connectivity_test.c @@ -135,14 +135,12 @@ void bad_server_thread(void *vargs) { gpr_mu_lock(args->mu); while (gpr_atm_acq_load(&args->stop) == 0) { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = - gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN)); + grpc_millis deadline = grpc_exec_ctx_now(&exec_ctx) + 100; grpc_pollset_worker *worker = NULL; - if (!GRPC_LOG_IF_ERROR("pollset_work", - grpc_pollset_work(&exec_ctx, args->pollset, &worker, - now, deadline))) { + if (!GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, deadline))) { gpr_atm_rel_store(&args->stop, 1); } gpr_mu_unlock(args->mu); diff --git a/test/core/transport/bdp_estimator_test.c b/test/core/transport/bdp_estimator_test.c index dda48f45b1..a256eb3a4a 100644 --- a/test/core/transport/bdp_estimator_test.c +++ b/test/core/transport/bdp_estimator_test.c @@ -27,6 +27,18 @@ #include "src/core/lib/support/string.h" #include "test/core/util/test_config.h" +extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); + +static int g_clock = 0; + +static gpr_timespec fake_gpr_now(gpr_clock_type clock_type) { + return (gpr_timespec){ + .tv_sec = g_clock, .tv_nsec = 0, .clock_type = clock_type, + }; +} + +static void inc_time(void) { g_clock += 30; } + static void test_noop(void) { gpr_log(GPR_INFO, "test_noop"); grpc_bdp_estimator est; @@ -44,6 +56,7 @@ static void test_get_estimate_no_samples(void) { static void add_samples(grpc_bdp_estimator *estimator, int64_t *samples, size_t n) { grpc_bdp_estimator_add_incoming_bytes(estimator, 1234567); + inc_time(); GPR_ASSERT(grpc_bdp_estimator_need_ping(estimator) == true); grpc_bdp_estimator_schedule_ping(estimator); grpc_bdp_estimator_start_ping(estimator); @@ -130,6 +143,7 @@ static void test_get_estimate_random_values(size_t n) { int main(int argc, char **argv) { grpc_test_init(argc, argv); + gpr_now_impl = fake_gpr_now; grpc_init(); test_noop(); test_get_estimate_no_samples(); diff --git a/test/core/transport/status_conversion_test.c b/test/core/transport/status_conversion_test.c index 89558964c1..de8fa4458a 100644 --- a/test/core/transport/status_conversion_test.c +++ b/test/core/transport/status_conversion_test.c @@ -22,8 +22,13 @@ #define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \ GPR_ASSERT(grpc_status_to_http2_error(a) == (b)) -#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \ - GPR_ASSERT(grpc_http2_error_to_grpc_status(a, deadline) == (b)) +#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \ + do { \ + grpc_exec_ctx my_exec_ctx = GRPC_EXEC_CTX_INIT; \ + GPR_ASSERT(grpc_http2_error_to_grpc_status(&my_exec_ctx, a, deadline) == \ + (b)); \ + grpc_exec_ctx_finish(&my_exec_ctx); \ + } while (0) #define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \ GPR_ASSERT(grpc_status_to_http2_status(a) == (b)) #define HTTP2_STATUS_TO_GRPC_STATUS(a, b) \ @@ -79,7 +84,7 @@ int main(int argc, char **argv) { GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_UNAVAILABLE, 200); GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_DATA_LOSS, 200); - const gpr_timespec before_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis before_deadline = GRPC_MILLIS_INF_FUTURE; HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, before_deadline, GRPC_STATUS_INTERNAL); HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, before_deadline, @@ -107,7 +112,7 @@ int main(int argc, char **argv) { HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_INADEQUATE_SECURITY, before_deadline, GRPC_STATUS_PERMISSION_DENIED); - const gpr_timespec after_deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); + const grpc_millis after_deadline = 0; HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, after_deadline, GRPC_STATUS_INTERNAL); HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, after_deadline, diff --git a/test/core/transport/timeout_encoding_test.c b/test/core/transport/timeout_encoding_test.c index 6388ffbcec..3010c6d057 100644 --- a/test/core/transport/timeout_encoding_test.c +++ b/test/core/transport/timeout_encoding_test.c @@ -25,12 +25,13 @@ #include <grpc/support/log.h> #include <grpc/support/string_util.h> #include <grpc/support/useful.h> +#include "src/core/lib/support/murmur_hash.h" #include "src/core/lib/support/string.h" #include "test/core/util/test_config.h" #define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x) -static void assert_encodes_as(gpr_timespec ts, const char *s) { +static void assert_encodes_as(grpc_millis ts, const char *s) { char buffer[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE]; grpc_http2_encode_timeout(ts, buffer); gpr_log(GPR_INFO, "check '%s' == '%s'", buffer, s); @@ -39,47 +40,43 @@ static void assert_encodes_as(gpr_timespec ts, const char *s) { void test_encoding(void) { LOG_TEST("test_encoding"); - assert_encodes_as(gpr_time_from_micros(-1, GPR_TIMESPAN), "1n"); - assert_encodes_as(gpr_time_from_seconds(-10, GPR_TIMESPAN), "1n"); - assert_encodes_as(gpr_time_from_nanos(10, GPR_TIMESPAN), "10n"); - assert_encodes_as(gpr_time_from_nanos(999999999, GPR_TIMESPAN), "1S"); - assert_encodes_as(gpr_time_from_micros(1, GPR_TIMESPAN), "1u"); - assert_encodes_as(gpr_time_from_micros(10, GPR_TIMESPAN), "10u"); - assert_encodes_as(gpr_time_from_micros(100, GPR_TIMESPAN), "100u"); - assert_encodes_as(gpr_time_from_micros(890, GPR_TIMESPAN), "890u"); - assert_encodes_as(gpr_time_from_micros(900, GPR_TIMESPAN), "900u"); - assert_encodes_as(gpr_time_from_micros(901, GPR_TIMESPAN), "901u"); - assert_encodes_as(gpr_time_from_millis(1, GPR_TIMESPAN), "1m"); - assert_encodes_as(gpr_time_from_millis(2, GPR_TIMESPAN), "2m"); - assert_encodes_as(gpr_time_from_micros(10001, GPR_TIMESPAN), "10100u"); - assert_encodes_as(gpr_time_from_micros(999999, GPR_TIMESPAN), "1S"); - assert_encodes_as(gpr_time_from_millis(1000, GPR_TIMESPAN), "1S"); - assert_encodes_as(gpr_time_from_millis(2000, GPR_TIMESPAN), "2S"); - assert_encodes_as(gpr_time_from_millis(2500, GPR_TIMESPAN), "2500m"); - assert_encodes_as(gpr_time_from_millis(59900, GPR_TIMESPAN), "59900m"); - assert_encodes_as(gpr_time_from_seconds(50, GPR_TIMESPAN), "50S"); - assert_encodes_as(gpr_time_from_seconds(59, GPR_TIMESPAN), "59S"); - assert_encodes_as(gpr_time_from_seconds(60, GPR_TIMESPAN), "1M"); - assert_encodes_as(gpr_time_from_seconds(80, GPR_TIMESPAN), "80S"); - assert_encodes_as(gpr_time_from_seconds(90, GPR_TIMESPAN), "90S"); - assert_encodes_as(gpr_time_from_minutes(2, GPR_TIMESPAN), "2M"); - assert_encodes_as(gpr_time_from_minutes(20, GPR_TIMESPAN), "20M"); - assert_encodes_as(gpr_time_from_hours(1, GPR_TIMESPAN), "1H"); - assert_encodes_as(gpr_time_from_hours(10, GPR_TIMESPAN), "10H"); - assert_encodes_as(gpr_time_from_seconds(1000000000, GPR_TIMESPAN), - "1000000000S"); + assert_encodes_as(-1, "1n"); + assert_encodes_as(-10, "1n"); + assert_encodes_as(1, "1m"); + assert_encodes_as(10, "10m"); + assert_encodes_as(100, "100m"); + assert_encodes_as(890, "890m"); + assert_encodes_as(900, "900m"); + assert_encodes_as(901, "901m"); + assert_encodes_as(1000, "1S"); + assert_encodes_as(2000, "2S"); + assert_encodes_as(2500, "2500m"); + assert_encodes_as(59900, "59900m"); + assert_encodes_as(50000, "50S"); + assert_encodes_as(59000, "59S"); + assert_encodes_as(60000, "1M"); + assert_encodes_as(80000, "80S"); + assert_encodes_as(90000, "90S"); + assert_encodes_as(120000, "2M"); + assert_encodes_as(20 * 60 * GPR_MS_PER_SEC, "20M"); + assert_encodes_as(60 * 60 * GPR_MS_PER_SEC, "1H"); + assert_encodes_as(10 * 60 * 60 * GPR_MS_PER_SEC, "10H"); } -static void assert_decodes_as(const char *buffer, gpr_timespec expected) { - gpr_timespec got; - gpr_log(GPR_INFO, "check decoding '%s'", buffer); +static void assert_decodes_as(const char *buffer, grpc_millis expected) { + grpc_millis got; + uint32_t hash = gpr_murmur_hash3(buffer, strlen(buffer), 0); + gpr_log(GPR_INFO, "check decoding '%s' (hash=0x%x)", buffer, hash); GPR_ASSERT(1 == grpc_http2_decode_timeout( grpc_slice_from_static_string(buffer), &got)); - GPR_ASSERT(0 == gpr_time_cmp(got, expected)); + if (got != expected) { + gpr_log(GPR_ERROR, "got:'%" PRIdPTR "' != expected:'%" PRIdPTR "'", got, + expected); + abort(); + } } -void decode_suite(char ext, - gpr_timespec (*answer)(int64_t x, gpr_clock_type clock)) { +void decode_suite(char ext, grpc_millis (*answer)(int64_t x)) { long test_vals[] = {1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 123456789, 98765432, 9876543, 987654, 98765, 9876, 987, 98, 9}; @@ -87,41 +84,55 @@ void decode_suite(char ext, char *input; for (i = 0; i < GPR_ARRAY_SIZE(test_vals); i++) { gpr_asprintf(&input, "%ld%c", test_vals[i], ext); - assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN)); + assert_decodes_as(input, answer(test_vals[i])); gpr_free(input); gpr_asprintf(&input, " %ld%c", test_vals[i], ext); - assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN)); + assert_decodes_as(input, answer(test_vals[i])); gpr_free(input); gpr_asprintf(&input, "%ld %c", test_vals[i], ext); - assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN)); + assert_decodes_as(input, answer(test_vals[i])); gpr_free(input); gpr_asprintf(&input, "%ld %c ", test_vals[i], ext); - assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN)); + assert_decodes_as(input, answer(test_vals[i])); gpr_free(input); } } +static grpc_millis millis_from_nanos(int64_t x) { + return x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0); +} +static grpc_millis millis_from_micros(int64_t x) { + return x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0); +} +static grpc_millis millis_from_millis(int64_t x) { return x; } +static grpc_millis millis_from_seconds(int64_t x) { return x * GPR_MS_PER_SEC; } +static grpc_millis millis_from_minutes(int64_t x) { + return x * 60 * GPR_MS_PER_SEC; +} +static grpc_millis millis_from_hours(int64_t x) { + return x * 3600 * GPR_MS_PER_SEC; +} + void test_decoding(void) { LOG_TEST("test_decoding"); - decode_suite('n', gpr_time_from_nanos); - decode_suite('u', gpr_time_from_micros); - decode_suite('m', gpr_time_from_millis); - decode_suite('S', gpr_time_from_seconds); - decode_suite('M', gpr_time_from_minutes); - decode_suite('H', gpr_time_from_hours); - assert_decodes_as("1000000000S", - gpr_time_from_seconds(1000 * 1000 * 1000, GPR_TIMESPAN)); - assert_decodes_as("1000000000000000000000u", gpr_inf_future(GPR_TIMESPAN)); - assert_decodes_as("1000000001S", gpr_inf_future(GPR_TIMESPAN)); - assert_decodes_as("2000000001S", gpr_inf_future(GPR_TIMESPAN)); - assert_decodes_as("9999999999S", gpr_inf_future(GPR_TIMESPAN)); + decode_suite('n', millis_from_nanos); + decode_suite('u', millis_from_micros); + decode_suite('m', millis_from_millis); + decode_suite('S', millis_from_seconds); + decode_suite('M', millis_from_minutes); + decode_suite('H', millis_from_hours); + assert_decodes_as("1000000000S", millis_from_seconds(1000 * 1000 * 1000)); + assert_decodes_as("1000000000000000000000u", GRPC_MILLIS_INF_FUTURE); + assert_decodes_as("1000000001S", GRPC_MILLIS_INF_FUTURE); + assert_decodes_as("2000000001S", GRPC_MILLIS_INF_FUTURE); + assert_decodes_as("9999999999S", GRPC_MILLIS_INF_FUTURE); } static void assert_decoding_fails(const char *s) { - gpr_timespec x; + grpc_millis x; GPR_ASSERT(0 == grpc_http2_decode_timeout(grpc_slice_from_static_string(s), &x)); } diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c index ba4028dbee..7b94ac4ada 100644 --- a/test/core/util/port_server_client.c +++ b/test/core/util/port_server_client.c @@ -88,7 +88,7 @@ void grpc_free_port_using_server(int port) { grpc_resource_quota *resource_quota = grpc_resource_quota_create("port_server_client/free"); grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req, - grpc_timeout_seconds_to_deadline(30), + grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC, GRPC_CLOSURE_CREATE(freed_port_from_server, &pr, grpc_schedule_on_exec_ctx), &rsp); @@ -100,8 +100,8 @@ void grpc_free_port_using_server(int port) { if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - grpc_timeout_seconds_to_deadline(1)))) { + &worker, + grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) { pr.done = 1; } } @@ -173,7 +173,7 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg, grpc_resource_quota *resource_quota = grpc_resource_quota_create("port_server_client/pick_retry"); grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req, - grpc_timeout_seconds_to_deadline(10), + grpc_exec_ctx_now(exec_ctx) + 30 * GPR_MS_PER_SEC, GRPC_CLOSURE_CREATE(got_port_from_server, pr, grpc_schedule_on_exec_ctx), &pr->response); @@ -224,7 +224,7 @@ int grpc_pick_port_using_server(void) { grpc_resource_quota_create("port_server_client/pick"); grpc_httpcli_get( &exec_ctx, &context, &pr.pops, resource_quota, &req, - grpc_timeout_seconds_to_deadline(30), + grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC, GRPC_CLOSURE_CREATE(got_port_from_server, &pr, grpc_schedule_on_exec_ctx), &pr.response); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); @@ -235,8 +235,8 @@ int grpc_pick_port_using_server(void) { if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - grpc_timeout_seconds_to_deadline(1)))) { + &worker, + grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) { pr.port = 0; } } diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c index d3a1de8a3b..611ecb330c 100644 --- a/test/core/util/test_tcp_server.c +++ b/test/core/util/test_tcp_server.c @@ -31,6 +31,7 @@ #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/tcp_server.h" #include "test/core/util/port.h" +#include "test/core/util/test_config.h" static void on_server_destroyed(grpc_exec_ctx *exec_ctx, void *data, grpc_error *error) { @@ -78,14 +79,13 @@ void test_tcp_server_start(test_tcp_server *server, int port) { void test_tcp_server_poll(test_tcp_server *server, int seconds) { grpc_pollset_worker *worker = NULL; - gpr_timespec deadline = - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(seconds, GPR_TIMESPAN)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_millis deadline = grpc_timespec_to_millis_round_up( + grpc_timeout_seconds_to_deadline(seconds)); gpr_mu_lock(server->mu); - GRPC_LOG_IF_ERROR("pollset_work", - grpc_pollset_work(&exec_ctx, server->pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(&exec_ctx, server->pollset, &worker, deadline)); gpr_mu_unlock(server->mu); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/test/cpp/common/alarm_cpp_test.cc b/test/cpp/common/alarm_cpp_test.cc index 212972d25d..7adc3102f4 100644 --- a/test/cpp/common/alarm_cpp_test.cc +++ b/test/cpp/common/alarm_cpp_test.cc @@ -142,7 +142,7 @@ TEST(AlarmTest, ZeroExpiry) { void* output_tag; bool ok; const CompletionQueue::NextStatus status = cq.AsyncNext( - (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(0)); + (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(1)); EXPECT_EQ(status, CompletionQueue::GOT_EVENT); EXPECT_TRUE(ok); @@ -158,7 +158,7 @@ TEST(AlarmTest, NegativeExpiry) { void* output_tag; bool ok; const CompletionQueue::NextStatus status = cq.AsyncNext( - (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(0)); + (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(1)); EXPECT_EQ(status, CompletionQueue::GOT_EVENT); EXPECT_TRUE(ok); diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc index b9e6e18ca7..6e95a55283 100644 --- a/test/cpp/end2end/generic_end2end_test.cc +++ b/test/cpp/end2end/generic_end2end_test.cc @@ -145,7 +145,7 @@ class GenericEnd2endTest : public ::testing::Test { if (check_deadline) { EXPECT_TRUE(gpr_time_similar(deadline, srv_ctx.raw_deadline(), - gpr_time_from_millis(100, GPR_TIMESPAN))); + gpr_time_from_millis(1000, GPR_TIMESPAN))); } ByteBuffer recv_buffer; diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index cadc9b2a11..cf9a42e8c6 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -554,7 +554,7 @@ static void BM_IsolatedFilter(benchmark::State &state) { grpc_exec_ctx_flush(&exec_ctx); grpc_call_stack *call_stack = static_cast<grpc_call_stack *>( gpr_zalloc(channel_stack->call_stack_size)); - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC); grpc_slice method = grpc_slice_from_static_string("/foo/bar"); grpc_call_final_info final_info; diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc index 070034fe33..6f9dee7822 100644 --- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc +++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc @@ -321,7 +321,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { grpc_metadata_batch b; grpc_metadata_batch_init(&b); - b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + b.deadline = GRPC_MILLIS_INF_FUTURE; std::vector<grpc_mdelem> elems = Metadata::GetElems(f.exec_ctx()); std::vector<grpc_linked_mdelem> storage(elems.size()); for (size_t i = 0; i < elems.size(); i++) { @@ -410,7 +410,7 @@ static void BM_TransportStreamSend(benchmark::State &state) { grpc_metadata_batch b; grpc_metadata_batch_init(&b); - b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + b.deadline = GRPC_MILLIS_INF_FUTURE; std::vector<grpc_mdelem> elems = RepresentativeClientInitialMetadata::GetElems(f.exec_ctx()); std::vector<grpc_linked_mdelem> storage(elems.size()); @@ -542,7 +542,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { grpc_metadata_batch_init(&b); grpc_metadata_batch b_recv; grpc_metadata_batch_init(&b_recv); - b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + b.deadline = GRPC_MILLIS_INF_FUTURE; std::vector<grpc_mdelem> elems = RepresentativeClientInitialMetadata::GetElems(f.exec_ctx()); std::vector<grpc_linked_mdelem> storage(elems.size()); diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc index 5c9405f583..57a69acf01 100644 --- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc +++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc @@ -73,9 +73,9 @@ static void cq_done_cb(grpc_exec_ctx* exec_ctx, void* done_arg, /* Queues a completion tag if deadline is > 0. * Does nothing if deadline is 0 (i.e gpr_time_0(GPR_CLOCK_MONOTONIC)) */ static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps, - grpc_pollset_worker** worker, gpr_timespec now, - gpr_timespec deadline) { - if (gpr_time_cmp(deadline, gpr_time_0(GPR_CLOCK_MONOTONIC)) == 0) { + grpc_pollset_worker** worker, + grpc_millis deadline) { + if (deadline == 0) { gpr_log(GPR_DEBUG, "no-op"); return GRPC_ERROR_NONE; } diff --git a/test/cpp/microbenchmarks/bm_error.cc b/test/cpp/microbenchmarks/bm_error.cc index bd5f02e172..56b80dfcf6 100644 --- a/test/cpp/microbenchmarks/bm_error.cc +++ b/test/cpp/microbenchmarks/bm_error.cc @@ -159,39 +159,39 @@ BENCHMARK(BM_ErrorGetPresentInt); // Fixtures for tests: generate different kinds of errors class ErrorNone { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return GRPC_ERROR_NONE; } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; }; class ErrorCancelled { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return GRPC_ERROR_CANCELLED; } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; }; class SimpleError { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return error_.get(); } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; ErrorPtr error_{GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error")}; }; class ErrorWithGrpcStatus { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return error_.get(); } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; ErrorPtr error_{grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNIMPLEMENTED)}; @@ -199,11 +199,11 @@ class ErrorWithGrpcStatus { class ErrorWithHttpError { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return error_.get(); } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; ErrorPtr error_{grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_COMPRESSION_ERROR)}; @@ -211,11 +211,11 @@ class ErrorWithHttpError { class ErrorWithNestedGrpcStatus { public: - gpr_timespec deadline() const { return deadline_; } + grpc_millis deadline() const { return deadline_; } grpc_error* error() const { return error_.get(); } private: - const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC); + const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE; ErrorPtr nested_error_{grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNIMPLEMENTED)}; @@ -248,12 +248,14 @@ template <class Fixture> static void BM_ErrorGetStatus(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; while (state.KeepRunning()) { grpc_status_code status; grpc_slice slice; - grpc_error_get_status(fixture.error(), fixture.deadline(), &status, &slice, - NULL); + grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(), + &status, &slice, NULL); } + grpc_exec_ctx_finish(&exec_ctx); track_counters.Finish(state); } @@ -261,11 +263,13 @@ template <class Fixture> static void BM_ErrorGetStatusCode(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; while (state.KeepRunning()) { grpc_status_code status; - grpc_error_get_status(fixture.error(), fixture.deadline(), &status, NULL, - NULL); + grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(), + &status, NULL, NULL); } + grpc_exec_ctx_finish(&exec_ctx); track_counters.Finish(state); } @@ -273,11 +277,13 @@ template <class Fixture> static void BM_ErrorHttpError(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; while (state.KeepRunning()) { grpc_http2_error_code error; - grpc_error_get_status(fixture.error(), fixture.deadline(), NULL, NULL, - &error); + grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(), NULL, + NULL, &error); } + grpc_exec_ctx_finish(&exec_ctx); track_counters.Finish(state); } diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc index 2656566a50..b240e96fca 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc @@ -29,6 +29,7 @@ extern "C" { #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/internal.h" +#include "src/core/lib/iomgr/timer_manager.h" #include "test/core/util/trickle_endpoint.h" } @@ -45,6 +46,22 @@ DEFINE_int32(warmup_max_time_seconds, 10, namespace grpc { namespace testing { +gpr_atm g_now_us = 0; + +static gpr_timespec fake_now(gpr_clock_type clock_type) { + gpr_timespec t; + gpr_atm now = gpr_atm_no_barrier_load(&g_now_us); + t.tv_sec = now / GPR_US_PER_SEC; + t.tv_nsec = (now % GPR_US_PER_SEC) * GPR_NS_PER_US; + t.clock_type = clock_type; + return t; +} + +static void inc_time() { + gpr_atm_no_barrier_fetch_add(&g_now_us, 100); + grpc_timer_manager_tick(); +} + static void* tag(intptr_t x) { return reinterpret_cast<void*>(x); } template <class A0> @@ -158,6 +175,7 @@ class TrickledCHTTP2 : public EndpointPairFixture { void Step(bool update_stats) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + inc_time(); size_t client_backlog = grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client); size_t server_backlog = @@ -213,8 +231,7 @@ static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok, while (true) { fixture->Log(iteration); switch (fixture->cq()->AsyncNext( - t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_micros(100, GPR_TIMESPAN)))) { + t, ok, gpr_inf_past(GPR_CLOCK_MONOTONIC))) { case CompletionQueue::TIMEOUT: fixture->Step(iteration != -1); break; @@ -289,9 +306,15 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) { inner_loop(false); } response_rw.Finish(Status::OK, tag(1)); - need_tags = (1 << 0) | (1 << 1); + grpc::Status status; + request_rw->Finish(&status, tag(2)); + need_tags = (1 << 0) | (1 << 1) | (1 << 2); while (need_tags) { TrickleCQNext(fixture.get(), &t, &ok, -1); + if (t == tag(0) && ok) { + request_rw->Read(&recv_response, tag(0)); + continue; + } int i = (int)(intptr_t)t; GPR_ASSERT(need_tags & (1 << i)); need_tags &= ~(1 << i); @@ -419,8 +442,12 @@ BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs); } } +extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); + int main(int argc, char** argv) { ::benchmark::Initialize(&argc, argv); ::grpc::testing::InitTest(&argc, &argv, false); + grpc_timer_manager_set_threading(false); + gpr_now_impl = ::grpc::testing::fake_now; ::benchmark::RunSpecifiedBenchmarks(); } diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc index 1fc1f2f83b..eab1e89480 100644 --- a/test/cpp/microbenchmarks/bm_pollset.cc +++ b/test/cpp/microbenchmarks/bm_pollset.cc @@ -117,11 +117,9 @@ static void BM_PollEmptyPollset(benchmark::State& state) { gpr_mu* mu; grpc_pollset_init(ps, &mu); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - gpr_timespec now = gpr_time_0(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); gpr_mu_lock(mu); while (state.KeepRunning()) { - GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, now, deadline)); + GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, 0)); } grpc_closure shutdown_ps_closure; GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps, @@ -223,8 +221,6 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) { gpr_mu* mu; grpc_pollset_init(ps, &mu); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - gpr_timespec now = gpr_time_0(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); grpc_wakeup_fd wakeup_fd; GRPC_ERROR_UNREF(grpc_wakeup_fd_init(&wakeup_fd)); grpc_fd* wakeup = grpc_fd_create(wakeup_fd.read_fd, "wakeup_read"); @@ -245,7 +241,8 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) { grpc_fd_notify_on_read(&exec_ctx, wakeup, continue_closure); gpr_mu_lock(mu); while (!done) { - GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, now, deadline)); + GRPC_ERROR_UNREF( + grpc_pollset_work(&exec_ctx, ps, NULL, GRPC_MILLIS_INF_FUTURE)); } grpc_fd_orphan(&exec_ctx, wakeup, NULL, NULL, false /* already_closed */, "done"); diff --git a/test/cpp/microbenchmarks/fullstack_fixtures.h b/test/cpp/microbenchmarks/fullstack_fixtures.h index ecd28c3f8a..a7f8504505 100644 --- a/test/cpp/microbenchmarks/fullstack_fixtures.h +++ b/test/cpp/microbenchmarks/fullstack_fixtures.h @@ -85,7 +85,7 @@ class FullstackFixture : public BaseFixture { } virtual ~FullstackFixture() { - server_->Shutdown(); + server_->Shutdown(gpr_inf_past(GPR_CLOCK_MONOTONIC)); cq_->Shutdown(); void* tag; bool ok; @@ -212,7 +212,7 @@ class EndpointPairFixture : public BaseFixture { } virtual ~EndpointPairFixture() { - server_->Shutdown(); + server_->Shutdown(gpr_inf_past(GPR_CLOCK_MONOTONIC)); cq_->Shutdown(); void* tag; bool ok; diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index 396f4ceb9e..0851fd6137 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -199,10 +199,10 @@ void PollPollsetUntilRequestDone(ArgsStruct *args) { grpc_pollset_worker *worker = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(args->mu); - GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, args->pollset, &worker, - gpr_now(GPR_CLOCK_REALTIME), NSecondDeadline(1))); + GRPC_LOG_IF_ERROR("pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, + grpc_timespec_to_millis_round_up( + NSecondDeadline(1)))); gpr_mu_unlock(args->mu); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/tools/debug/core/chttp2_ref_leak.py b/tools/debug/core/chttp2_ref_leak.py new file mode 100755 index 0000000000..d693dd9e54 --- /dev/null +++ b/tools/debug/core/chttp2_ref_leak.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python2.7 +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reads stdin to find chttp2_refcount log lines, and prints reference leaks +# to stdout + +import collections +import sys +import re + +def new_obj(): + return ['destroy'] + +outstanding = collections.defaultdict(new_obj) + +# Sample log line: +# chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599] + +for line in sys.stdin: + m = re.search(r'chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line) + if m: + if m.group(1) == ' ref': + outstanding[m.group(2)].append(m.group(3)) + else: + outstanding[m.group(2)].remove(m.group(3)) + +for obj, remaining in outstanding.items(): + if remaining: + print 'LEAKED: %s %r' % (obj, remaining) + diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 49919415bd..02b0ce1638 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -931,6 +931,7 @@ include/grpc/support/tls_pthread.h \ include/grpc/support/useful.h \ include/grpc/support/workaround_list.h \ src/core/ext/transport/inproc/inproc_transport.h \ +src/core/lib/backoff/backoff.h \ src/core/lib/channel/channel_args.h \ src/core/lib/channel/channel_stack.h \ src/core/lib/channel/channel_stack_builder.h \ @@ -1024,7 +1025,6 @@ src/core/lib/support/arena.h \ src/core/lib/support/atomic.h \ src/core/lib/support/atomic_with_atm.h \ src/core/lib/support/atomic_with_std.h \ -src/core/lib/support/backoff.h \ src/core/lib/support/block_annotate.h \ src/core/lib/support/env.h \ src/core/lib/support/memory.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index e352cb78f1..a941552471 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1055,6 +1055,8 @@ src/core/ext/transport/inproc/inproc_plugin.c \ src/core/ext/transport/inproc/inproc_transport.c \ src/core/ext/transport/inproc/inproc_transport.h \ src/core/lib/README.md \ +src/core/lib/backoff/backoff.c \ +src/core/lib/backoff/backoff.h \ src/core/lib/channel/README.md \ src/core/lib/channel/channel_args.c \ src/core/lib/channel/channel_args.h \ @@ -1296,8 +1298,6 @@ src/core/lib/support/atomic.h \ src/core/lib/support/atomic_with_atm.h \ src/core/lib/support/atomic_with_std.h \ src/core/lib/support/avl.c \ -src/core/lib/support/backoff.c \ -src/core/lib/support/backoff.h \ src/core/lib/support/block_annotate.h \ src/core/lib/support/cmdline.c \ src/core/lib/support/cpu_iphone.c \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 8d42c3e432..1374cddf89 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -104,6 +104,23 @@ "gpr", "gpr_test_util", "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "backoff_test", + "src": [ + "test/core/backoff/backoff_test.c" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", "grpc_test_util", "test_tcp_server" ], @@ -734,21 +751,6 @@ "headers": [], "is_filegroup": false, "language": "c", - "name": "gpr_backoff_test", - "src": [ - "test/core/support/backoff_test.c" - ], - "third_party": false, - "type": "target" - }, - { - "deps": [ - "gpr", - "gpr_test_util" - ], - "headers": [], - "is_filegroup": false, - "language": "c", "name": "gpr_cmdline_test", "src": [ "test/core/support/cmdline_test.c" @@ -7773,7 +7775,6 @@ "src/core/lib/support/arena.c", "src/core/lib/support/atm.c", "src/core/lib/support/avl.c", - "src/core/lib/support/backoff.c", "src/core/lib/support/cmdline.c", "src/core/lib/support/cpu_iphone.c", "src/core/lib/support/cpu_linux.c", @@ -7854,7 +7855,6 @@ "src/core/lib/support/atomic.h", "src/core/lib/support/atomic_with_atm.h", "src/core/lib/support/atomic_with_std.h", - "src/core/lib/support/backoff.h", "src/core/lib/support/block_annotate.h", "src/core/lib/support/env.h", "src/core/lib/support/memory.h", @@ -7903,7 +7903,6 @@ "src/core/lib/support/atomic.h", "src/core/lib/support/atomic_with_atm.h", "src/core/lib/support/atomic_with_std.h", - "src/core/lib/support/backoff.h", "src/core/lib/support/block_annotate.h", "src/core/lib/support/env.h", "src/core/lib/support/memory.h", @@ -7997,6 +7996,7 @@ "language": "c", "name": "grpc_base", "src": [ + "src/core/lib/backoff/backoff.c", "src/core/lib/channel/channel_args.c", "src/core/lib/channel/channel_stack.c", "src/core/lib/channel/channel_stack_builder.c", @@ -8148,6 +8148,7 @@ "include/grpc/slice_buffer.h", "include/grpc/status.h", "include/grpc/support/workaround_list.h", + "src/core/lib/backoff/backoff.h", "src/core/lib/channel/channel_args.h", "src/core/lib/channel/channel_stack.h", "src/core/lib/channel/channel_stack_builder.h", @@ -8279,6 +8280,7 @@ "include/grpc/slice_buffer.h", "include/grpc/status.h", "include/grpc/support/workaround_list.h", + "src/core/lib/backoff/backoff.h", "src/core/lib/channel/channel_args.h", "src/core/lib/channel/channel_stack.h", "src/core/lib/channel/channel_stack_builder.h", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 72dfbc0b98..ff1d47cf89 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -121,6 +121,28 @@ ], "cpu_cost": 1.0, "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "backoff_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], "exclude_iomgrs": [ "uv" ], @@ -805,28 +827,6 @@ "flaky": false, "gtest": false, "language": "c", - "name": "gpr_backoff_test", - "platforms": [ - "linux", - "mac", - "posix", - "windows" - ] - }, - { - "args": [], - "ci_platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "gtest": false, - "language": "c", "name": "gpr_cmdline_test", "platforms": [ "linux", diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py index ef4f90cd93..3bdace1ac3 100644 --- a/tools/run_tests/performance/massage_qps_stats.py +++ b/tools/run_tests/performance/massage_qps_stats.py @@ -74,6 +74,7 @@ def massage_qps_stats(scenario_result): stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled") stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response") stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream") + stats["core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_spurious_writes_begun") stats["core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_initiated") stats["core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_items") stats["core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_final_items") diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index e8fae6989e..583bce1fff 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -382,6 +382,11 @@ }, { "mode": "NULLABLE", + "name": "core_http2_spurious_writes_begun", + "type": "INTEGER" + }, + { + "mode": "NULLABLE", "name": "core_combiner_locks_initiated", "type": "INTEGER" }, @@ -1099,6 +1104,11 @@ }, { "mode": "NULLABLE", + "name": "core_http2_spurious_writes_begun", + "type": "INTEGER" + }, + { + "mode": "NULLABLE", "name": "core_combiner_locks_initiated", "type": "INTEGER" }, diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index b38108d456..f661aed4e2 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -1479,8 +1479,11 @@ class BuildAndRunError(object): def _has_epollexclusive(): + binary = 'bins/%s/check_epollexclusive' % args.config + if not os.path.exists(binary): + return False try: - subprocess.check_call('bins/%s/check_epollexclusive' % args.config) + subprocess.check_call(binary) return True except subprocess.CalledProcessError, e: return False |