aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-10-06 11:56:39 -0700
committerGravatar GitHub <noreply@github.com>2017-10-06 11:56:39 -0700
commit3640b4084ac8485ef99ab3162cae7b8b079bbf6b (patch)
tree8c949d86089640bd8449a1c011eec9b0a176f673
parent1b11c745417dc54ccc31bba4cdfb2adb02c09ab9 (diff)
parentd48bd078d7f257db2d4c48e8e835bb2ff1ac7e73 (diff)
Merge pull request #12677 from ctiller/flowctl+millis
Roll-up: Flow control changes, and internal timing changes
-rw-r--r--BUILD6
-rw-r--r--CMakeLists.txt65
-rw-r--r--Makefile79
-rw-r--r--binding.gyp2
-rw-r--r--build.yaml24
-rw-r--r--config.m43
-rw-r--r--config.w323
-rw-r--r--gRPC-Core.podspec10
-rw-r--r--grpc.gemspec6
-rw-r--r--grpc.gyp5
-rw-r--r--include/grpc/impl/codegen/atm_gcc_atomic.h1
-rw-r--r--include/grpc/impl/codegen/atm_gcc_sync.h1
-rw-r--r--include/grpc/impl/codegen/atm_windows.h1
-rw-r--r--package.xml6
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc4
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc34
-rw-r--r--src/core/ext/filters/client_channel/connector.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc73
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc11
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h3
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc30
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc34
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc25
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h2
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.cc17
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h5
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.cc75
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc259
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.cc88
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.cc13
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.cc10
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h36
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.cc26
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc120
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc16
-rw-r--r--src/core/lib/backoff/backoff.cc (renamed from src/core/lib/support/backoff.cc)33
-rw-r--r--src/core/lib/backoff/backoff.h (renamed from src/core/lib/support/backoff.h)34
-rw-r--r--src/core/lib/channel/channel_stack.h2
-rw-r--r--src/core/lib/channel/handshaker.cc6
-rw-r--r--src/core/lib/channel/handshaker.h2
-rw-r--r--src/core/lib/debug/stats_data.cc2
-rw-r--r--src/core/lib/debug/stats_data.h4
-rw-r--r--src/core/lib/debug/stats_data.yaml3
-rw-r--r--src/core/lib/debug/stats_data_bq_schema.sql1
-rw-r--r--src/core/lib/http/httpcli.cc13
-rw-r--r--src/core/lib/http/httpcli.h12
-rw-r--r--src/core/lib/http/httpcli_security_connector.cc2
-rw-r--r--src/core/lib/iomgr/block_annotate.h (renamed from src/core/lib/support/block_annotate.h)14
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc54
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc80
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc41
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc43
-rw-r--r--src/core/lib/iomgr/ev_posix.cc6
-rw-r--r--src/core/lib/iomgr/ev_posix.h4
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc62
-rw-r--r--src/core/lib/iomgr/exec_ctx.h18
-rw-r--r--src/core/lib/iomgr/executor.cc1
-rw-r--r--src/core/lib/iomgr/iocp_windows.cc29
-rw-r--r--src/core/lib/iomgr/iocp_windows.h2
-rw-r--r--src/core/lib/iomgr/iomgr.cc7
-rw-r--r--src/core/lib/iomgr/load_file.cc4
-rw-r--r--src/core/lib/iomgr/pollset.h4
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc7
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc5
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc6
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc4
-rw-r--r--src/core/lib/iomgr/resource_quota.cc34
-rw-r--r--src/core/lib/iomgr/tcp_client.h2
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc11
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc10
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.cc10
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc10
-rw-r--r--src/core/lib/iomgr/timer.h7
-rw-r--r--src/core/lib/iomgr/timer_generic.cc124
-rw-r--r--src/core/lib/iomgr/timer_manager.cc49
-rw-r--r--src/core/lib/iomgr/timer_uv.cc9
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.cc7
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.cc12
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.h2
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.cc37
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.h6
-rw-r--r--src/core/lib/support/time_posix.cc3
-rw-r--r--src/core/lib/support/time_windows.cc3
-rw-r--r--src/core/lib/surface/alarm.cc3
-rw-r--r--src/core/lib/surface/call.cc59
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/channel.cc9
-rw-r--r--src/core/lib/surface/channel.h2
-rw-r--r--src/core/lib/surface/completion_queue.cc50
-rw-r--r--src/core/lib/surface/lame_client.cc2
-rw-r--r--src/core/lib/surface/server.cc16
-rw-r--r--src/core/lib/transport/bdp_estimator.cc36
-rw-r--r--src/core/lib/transport/bdp_estimator.h12
-rw-r--r--src/core/lib/transport/error_utils.cc9
-rw-r--r--src/core/lib/transport/error_utils.h6
-rw-r--r--src/core/lib/transport/metadata_batch.cc6
-rw-r--r--src/core/lib/transport/metadata_batch.h4
-rw-r--r--src/core/lib/transport/status_conversion.cc7
-rw-r--r--src/core/lib/transport/status_conversion.h6
-rw-r--r--src/core/lib/transport/timeout_encoding.cc75
-rw-r--r--src/core/lib/transport/timeout_encoding.h5
-rw-r--r--src/core/lib/transport/transport_op_string.cc5
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py2
-rw-r--r--test/core/backoff/BUILD36
-rw-r--r--test/core/backoff/backoff_test.c149
-rw-r--r--test/core/bad_client/bad_client.c15
-rw-r--r--test/core/bad_client/tests/window_overflow.c2
-rw-r--r--test/core/channel/channel_stack_test.c2
-rw-r--r--test/core/client_channel/resolvers/dns_resolver_connectivity_test.c2
-rw-r--r--test/core/end2end/fixtures/h2_ssl_cert.c2
-rw-r--r--test/core/end2end/fixtures/http_proxy_fixture.c10
-rw-r--r--test/core/end2end/fuzzers/api_fuzzer.c22
-rw-r--r--test/core/end2end/invalid_call_argument_test.c2
-rw-r--r--test/core/end2end/tests/bad_ping.c17
-rw-r--r--test/core/end2end/tests/keepalive_timeout.c26
-rw-r--r--test/core/http/httpcli_test.c11
-rw-r--r--test/core/http/httpscli_test.c11
-rw-r--r--test/core/iomgr/endpoint_tests.c16
-rw-r--r--test/core/iomgr/ev_epollsig_linux_test.c6
-rw-r--r--test/core/iomgr/fd_posix_test.c24
-rw-r--r--test/core/iomgr/pollset_set_test.c24
-rw-r--r--test/core/iomgr/resolve_address_posix_test.c28
-rw-r--r--test/core/iomgr/resolve_address_test.c28
-rw-r--r--test/core/iomgr/tcp_client_posix_test.c21
-rw-r--r--test/core/iomgr/tcp_posix_test.c31
-rw-r--r--test/core/iomgr/tcp_server_posix_test.c8
-rw-r--r--test/core/iomgr/timer_list_test.c83
-rw-r--r--test/core/iomgr/udp_server_test.c12
-rw-r--r--test/core/security/credentials_test.c33
-rw-r--r--test/core/security/jwt_verifier_test.c14
-rw-r--r--test/core/security/oauth2_utils.c3
-rw-r--r--test/core/security/print_google_default_creds_token.c3
-rw-r--r--test/core/security/ssl_server_fuzzer.c3
-rw-r--r--test/core/security/verify_jwt.c8
-rw-r--r--test/core/support/BUILD10
-rw-r--r--test/core/support/backoff_test.c143
-rw-r--r--test/core/surface/concurrent_connectivity_test.c10
-rw-r--r--test/core/transport/bdp_estimator_test.c24
-rw-r--r--test/core/transport/status_conversion_test.c13
-rw-r--r--test/core/transport/timeout_encoding_test.c117
-rw-r--r--test/core/util/port_server_client.c14
-rw-r--r--test/core/util/test_tcp_server.c12
-rw-r--r--test/cpp/common/alarm_cpp_test.cc4
-rw-r--r--test/cpp/end2end/async_end2end_test.cc109
-rw-r--r--test/cpp/end2end/generic_end2end_test.cc2
-rw-r--r--test/cpp/microbenchmarks/bm_call_create.cc2
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_transport.cc6
-rw-r--r--test/cpp/microbenchmarks/bm_cq_multiple_threads.cc6
-rw-r--r--test/cpp/microbenchmarks/bm_error.cc42
-rw-r--r--test/cpp/microbenchmarks/bm_fullstack_trickle.cc35
-rw-r--r--test/cpp/microbenchmarks/bm_pollset.cc9
-rw-r--r--test/cpp/microbenchmarks/fullstack_fixtures.h4
-rw-r--r--test/cpp/naming/resolver_component_test.cc8
-rwxr-xr-xtools/debug/core/chttp2_ref_leak.py42
-rw-r--r--tools/doxygen/Doxyfile.c++.internal4
-rw-r--r--tools/doxygen/Doxyfile.core.internal6
-rw-r--r--tools/run_tests/generated/sources_and_headers.json42
-rw-r--r--tools/run_tests/generated/tests.json44
-rw-r--r--tools/run_tests/performance/massage_qps_stats.py1
-rw-r--r--tools/run_tests/performance/scenario_result_schema.json10
-rwxr-xr-xtools/run_tests/run_tests.py5
164 files changed, 1877 insertions, 1761 deletions
diff --git a/BUILD b/BUILD
index d0b37dfa6c..8ccc748039 100644
--- a/BUILD
+++ b/BUILD
@@ -467,7 +467,6 @@ grpc_cc_library(
"src/core/lib/support/arena.cc",
"src/core/lib/support/atm.cc",
"src/core/lib/support/avl.cc",
- "src/core/lib/support/backoff.cc",
"src/core/lib/support/cmdline.cc",
"src/core/lib/support/cpu_iphone.cc",
"src/core/lib/support/cpu_linux.cc",
@@ -514,8 +513,6 @@ grpc_cc_library(
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
- "src/core/lib/support/backoff.h",
- "src/core/lib/support/block_annotate.h",
"src/core/lib/support/env.h",
"src/core/lib/support/memory.h",
"src/core/lib/support/mpscq.h",
@@ -568,6 +565,7 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_base_c",
srcs = [
+ "src/core/lib/backoff/backoff.cc",
"src/core/lib/channel/channel_args.cc",
"src/core/lib/channel/channel_stack.cc",
"src/core/lib/channel/channel_stack_builder.cc",
@@ -762,6 +760,7 @@ grpc_cc_library(
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/sys_epoll_wrapper.h",
+ "src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h",
@@ -817,6 +816,7 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_impl.h",
+ "src/core/lib/backoff/backoff.h",
],
external_deps = [
"zlib",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 759d49bd98..139d1bd46c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -379,6 +379,7 @@ add_dependencies(buildtests_c algorithm_test)
add_dependencies(buildtests_c alloc_test)
add_dependencies(buildtests_c alpn_test)
add_dependencies(buildtests_c arena_test)
+add_dependencies(buildtests_c backoff_test)
add_dependencies(buildtests_c bad_server_response_test)
add_dependencies(buildtests_c bdp_estimator_test)
add_dependencies(buildtests_c bin_decoder_test)
@@ -428,7 +429,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c goaway_server_test)
endif()
add_dependencies(buildtests_c gpr_avl_test)
-add_dependencies(buildtests_c gpr_backoff_test)
add_dependencies(buildtests_c gpr_cmdline_test)
add_dependencies(buildtests_c gpr_cpu_test)
add_dependencies(buildtests_c gpr_env_test)
@@ -786,7 +786,6 @@ add_library(gpr
src/core/lib/support/arena.cc
src/core/lib/support/atm.cc
src/core/lib/support/avl.cc
- src/core/lib/support/backoff.cc
src/core/lib/support/cmdline.cc
src/core/lib/support/cpu_iphone.cc
src/core/lib/support/cpu_linux.cc
@@ -955,6 +954,7 @@ endif (gRPC_BUILD_TESTS)
add_library(grpc
src/core/lib/surface/init.cc
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -1306,6 +1306,7 @@ endif()
add_library(grpc_cronet
src/core/lib/surface/init.cc
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -1625,6 +1626,7 @@ add_library(grpc_test_util
test/core/util/port_server_client.c
test/core/util/slice_splitter.c
test/core/util/trickle_endpoint.c
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -1888,6 +1890,7 @@ add_library(grpc_test_util_unsecure
test/core/util/port_server_client.c
test/core/util/slice_splitter.c
test/core/util/trickle_endpoint.c
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -2137,6 +2140,7 @@ endif (gRPC_BUILD_TESTS)
add_library(grpc_unsecure
src/core/lib/surface/init.cc
src/core/lib/surface/init_unsecure.cc
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -2894,6 +2898,7 @@ add_library(grpc++_cronet
src/core/ext/transport/chttp2/transport/stream_map.cc
src/core/ext/transport/chttp2/transport/varint.cc
src/core/ext/transport/chttp2/transport/writing.cc
+ src/core/lib/backoff/backoff.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_stack.cc
src/core/lib/channel/channel_stack_builder.cc
@@ -5195,6 +5200,35 @@ target_link_libraries(arena_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
+add_executable(backoff_test
+ test/core/backoff/backoff_test.c
+)
+
+
+target_include_directories(backoff_test
+ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+ PRIVATE ${BORINGSSL_ROOT_DIR}/include
+ PRIVATE ${PROTOBUF_ROOT_DIR}/src
+ PRIVATE ${BENCHMARK_ROOT_DIR}/include
+ PRIVATE ${ZLIB_ROOT_DIR}
+ PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+ PRIVATE ${CARES_INCLUDE_DIR}
+ PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+ PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(backoff_test
+ ${_gRPC_ALLTARGETS_LIBRARIES}
+ grpc_test_util
+ grpc
+ gpr_test_util
+ gpr
+)
+
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
+
add_executable(bad_server_response_test
test/core/end2end/bad_server_response_test.c
)
@@ -6295,33 +6329,6 @@ target_link_libraries(gpr_avl_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
-add_executable(gpr_backoff_test
- test/core/support/backoff_test.c
-)
-
-
-target_include_directories(gpr_backoff_test
- PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
- PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
- PRIVATE ${BORINGSSL_ROOT_DIR}/include
- PRIVATE ${PROTOBUF_ROOT_DIR}/src
- PRIVATE ${BENCHMARK_ROOT_DIR}/include
- PRIVATE ${ZLIB_ROOT_DIR}
- PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
- PRIVATE ${CARES_INCLUDE_DIR}
- PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
- PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gpr_backoff_test
- ${_gRPC_ALLTARGETS_LIBRARIES}
- gpr_test_util
- gpr
-)
-
-endif (gRPC_BUILD_TESTS)
-if (gRPC_BUILD_TESTS)
-
add_executable(gpr_cmdline_test
test/core/support/cmdline_test.c
)
diff --git a/Makefile b/Makefile
index 5ea638a9e7..382956dc44 100644
--- a/Makefile
+++ b/Makefile
@@ -950,6 +950,7 @@ alloc_test: $(BINDIR)/$(CONFIG)/alloc_test
alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer
arena_test: $(BINDIR)/$(CONFIG)/arena_test
+backoff_test: $(BINDIR)/$(CONFIG)/backoff_test
bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test
bdp_estimator_test: $(BINDIR)/$(CONFIG)/bdp_estimator_test
bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test
@@ -988,7 +989,6 @@ gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
goaway_server_test: $(BINDIR)/$(CONFIG)/goaway_server_test
gpr_avl_test: $(BINDIR)/$(CONFIG)/gpr_avl_test
-gpr_backoff_test: $(BINDIR)/$(CONFIG)/gpr_backoff_test
gpr_cmdline_test: $(BINDIR)/$(CONFIG)/gpr_cmdline_test
gpr_cpu_test: $(BINDIR)/$(CONFIG)/gpr_cpu_test
gpr_env_test: $(BINDIR)/$(CONFIG)/gpr_env_test
@@ -1350,6 +1350,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/alloc_test \
$(BINDIR)/$(CONFIG)/alpn_test \
$(BINDIR)/$(CONFIG)/arena_test \
+ $(BINDIR)/$(CONFIG)/backoff_test \
$(BINDIR)/$(CONFIG)/bad_server_response_test \
$(BINDIR)/$(CONFIG)/bdp_estimator_test \
$(BINDIR)/$(CONFIG)/bin_decoder_test \
@@ -1383,7 +1384,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/fling_test \
$(BINDIR)/$(CONFIG)/goaway_server_test \
$(BINDIR)/$(CONFIG)/gpr_avl_test \
- $(BINDIR)/$(CONFIG)/gpr_backoff_test \
$(BINDIR)/$(CONFIG)/gpr_cmdline_test \
$(BINDIR)/$(CONFIG)/gpr_cpu_test \
$(BINDIR)/$(CONFIG)/gpr_env_test \
@@ -1761,6 +1761,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/alpn_test || ( echo test alpn_test failed ; exit 1 )
$(E) "[RUN] Testing arena_test"
$(Q) $(BINDIR)/$(CONFIG)/arena_test || ( echo test arena_test failed ; exit 1 )
+ $(E) "[RUN] Testing backoff_test"
+ $(Q) $(BINDIR)/$(CONFIG)/backoff_test || ( echo test backoff_test failed ; exit 1 )
$(E) "[RUN] Testing bad_server_response_test"
$(Q) $(BINDIR)/$(CONFIG)/bad_server_response_test || ( echo test bad_server_response_test failed ; exit 1 )
$(E) "[RUN] Testing bdp_estimator_test"
@@ -1823,8 +1825,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/goaway_server_test || ( echo test goaway_server_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_avl_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_avl_test || ( echo test gpr_avl_test failed ; exit 1 )
- $(E) "[RUN] Testing gpr_backoff_test"
- $(Q) $(BINDIR)/$(CONFIG)/gpr_backoff_test || ( echo test gpr_backoff_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_cmdline_test"
$(Q) $(BINDIR)/$(CONFIG)/gpr_cmdline_test || ( echo test gpr_cmdline_test failed ; exit 1 )
$(E) "[RUN] Testing gpr_cpu_test"
@@ -2800,7 +2800,6 @@ LIBGPR_SRC = \
src/core/lib/support/arena.cc \
src/core/lib/support/atm.cc \
src/core/lib/support/avl.cc \
- src/core/lib/support/backoff.cc \
src/core/lib/support/cmdline.cc \
src/core/lib/support/cpu_iphone.cc \
src/core/lib/support/cpu_linux.cc \
@@ -2946,6 +2945,7 @@ endif
LIBGRPC_SRC = \
src/core/lib/surface/init.cc \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -3297,6 +3297,7 @@ endif
LIBGRPC_CRONET_SRC = \
src/core/lib/surface/init.cc \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -3615,6 +3616,7 @@ LIBGRPC_TEST_UTIL_SRC = \
test/core/util/port_server_client.c \
test/core/util/slice_splitter.c \
test/core/util/trickle_endpoint.c \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -3869,6 +3871,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
test/core/util/port_server_client.c \
test/core/util/slice_splitter.c \
test/core/util/trickle_endpoint.c \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -4096,6 +4099,7 @@ endif
LIBGRPC_UNSECURE_SRC = \
src/core/lib/surface/init.cc \
src/core/lib/surface/init_unsecure.cc \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -4836,6 +4840,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/transport/chttp2/transport/stream_map.cc \
src/core/ext/transport/chttp2/transport/varint.cc \
src/core/ext/transport/chttp2/transport/writing.cc \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -8892,6 +8897,38 @@ endif
endif
+BACKOFF_TEST_SRC = \
+ test/core/backoff/backoff_test.c \
+
+BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BACKOFF_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/backoff_test: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/backoff_test: $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+ $(E) "[LD] Linking $@"
+ $(Q) mkdir -p `dirname $@`
+ $(Q) $(LD) $(LDFLAGS) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/backoff_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/backoff/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_backoff_test: $(BACKOFF_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(BACKOFF_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
BAD_SERVER_RESPONSE_TEST_SRC = \
test/core/end2end/bad_server_response_test.c \
@@ -10111,38 +10148,6 @@ endif
endif
-GPR_BACKOFF_TEST_SRC = \
- test/core/support/backoff_test.c \
-
-GPR_BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GPR_BACKOFF_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gpr_backoff_test: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
- $(E) "[LD] Linking $@"
- $(Q) mkdir -p `dirname $@`
- $(Q) $(LD) $(LDFLAGS) $(GPR_BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gpr_backoff_test
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/core/support/backoff_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-
-deps_gpr_backoff_test: $(GPR_BACKOFF_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GPR_BACKOFF_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
GPR_CMDLINE_TEST_SRC = \
test/core/support/cmdline_test.c \
diff --git a/binding.gyp b/binding.gyp
index 1fd430154b..91919c3330 100644
--- a/binding.gyp
+++ b/binding.gyp
@@ -600,7 +600,6 @@
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
- 'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@@ -658,6 +657,7 @@
],
'sources': [
'src/core/lib/surface/init.cc',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
diff --git a/build.yaml b/build.yaml
index 7033d52542..d23716af2a 100644
--- a/build.yaml
+++ b/build.yaml
@@ -66,7 +66,6 @@ filegroups:
- src/core/lib/support/arena.cc
- src/core/lib/support/atm.cc
- src/core/lib/support/avl.cc
- - src/core/lib/support/backoff.cc
- src/core/lib/support/cmdline.cc
- src/core/lib/support/cpu_iphone.cc
- src/core/lib/support/cpu_linux.cc
@@ -143,8 +142,6 @@ filegroups:
- src/core/lib/support/atomic.h
- src/core/lib/support/atomic_with_atm.h
- src/core/lib/support/atomic_with_std.h
- - src/core/lib/support/backoff.h
- - src/core/lib/support/block_annotate.h
- src/core/lib/support/env.h
- src/core/lib/support/memory.h
- src/core/lib/support/mpscq.h
@@ -185,6 +182,7 @@ filegroups:
- grpc++_codegen_base
- name: grpc_base
src:
+ - src/core/lib/backoff/backoff.cc
- src/core/lib/channel/channel_args.cc
- src/core/lib/channel/channel_stack.cc
- src/core/lib/channel/channel_stack_builder.cc
@@ -337,6 +335,7 @@ filegroups:
- include/grpc/status.h
- include/grpc/support/workaround_list.h
headers:
+ - src/core/lib/backoff/backoff.h
- src/core/lib/channel/channel_args.h
- src/core/lib/channel/channel_stack.h
- src/core/lib/channel/channel_stack_builder.h
@@ -355,6 +354,7 @@ filegroups:
- src/core/lib/http/format_request.h
- src/core/lib/http/httpcli.h
- src/core/lib/http/parser.h
+ - src/core/lib/iomgr/block_annotate.h
- src/core/lib/iomgr/call_combiner.h
- src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h
@@ -1775,6 +1775,16 @@ targets:
deps:
- gpr_test_util
- gpr
+- name: backoff_test
+ build: test
+ language: c
+ src:
+ - test/core/backoff/backoff_test.c
+ deps:
+ - grpc_test_util
+ - grpc
+ - gpr_test_util
+ - gpr
- name: bad_server_response_test
build: test
language: c
@@ -2203,14 +2213,6 @@ targets:
deps:
- gpr_test_util
- gpr
-- name: gpr_backoff_test
- build: test
- language: c
- src:
- - test/core/support/backoff_test.c
- deps:
- - gpr_test_util
- - gpr
- name: gpr_cmdline_test
build: test
language: c
diff --git a/config.m4 b/config.m4
index 34d7116c73..5d92a2ae34 100644
--- a/config.m4
+++ b/config.m4
@@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/support/arena.cc \
src/core/lib/support/atm.cc \
src/core/lib/support/avl.cc \
- src/core/lib/support/backoff.cc \
src/core/lib/support/cmdline.cc \
src/core/lib/support/cpu_iphone.cc \
src/core/lib/support/cpu_linux.cc \
@@ -86,6 +85,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/support/tmpfile_windows.cc \
src/core/lib/support/wrap_memcpy.cc \
src/core/lib/surface/init.cc \
+ src/core/lib/backoff/backoff.cc \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
@@ -686,6 +686,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/server/secure)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/chttp2/transport)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/transport/inproc)
+ PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/backoff)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/channel)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/compression)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/debug)
diff --git a/config.w32 b/config.w32
index 3535582699..67b5e2f554 100644
--- a/config.w32
+++ b/config.w32
@@ -22,7 +22,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\support\\arena.cc " +
"src\\core\\lib\\support\\atm.cc " +
"src\\core\\lib\\support\\avl.cc " +
- "src\\core\\lib\\support\\backoff.cc " +
"src\\core\\lib\\support\\cmdline.cc " +
"src\\core\\lib\\support\\cpu_iphone.cc " +
"src\\core\\lib\\support\\cpu_linux.cc " +
@@ -63,6 +62,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\support\\tmpfile_windows.cc " +
"src\\core\\lib\\support\\wrap_memcpy.cc " +
"src\\core\\lib\\surface\\init.cc " +
+ "src\\core\\lib\\backoff\\backoff.cc " +
"src\\core\\lib\\channel\\channel_args.cc " +
"src\\core\\lib\\channel\\channel_stack.cc " +
"src\\core\\lib\\channel\\channel_stack_builder.cc " +
@@ -699,6 +699,7 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\chttp2\\transport");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\transport\\inproc");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib");
+ FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\backoff");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\channel");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\compression");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\debug");
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index ca5301aa10..08ef738732 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -179,8 +179,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/atomic.h',
'src/core/lib/support/atomic_with_atm.h',
'src/core/lib/support/atomic_with_std.h',
- 'src/core/lib/support/backoff.h',
- 'src/core/lib/support/block_annotate.h',
'src/core/lib/support/env.h',
'src/core/lib/support/memory.h',
'src/core/lib/support/mpscq.h',
@@ -197,7 +195,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
- 'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@@ -309,6 +306,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/transport/inproc/inproc_transport.h',
+ 'src/core/lib/backoff/backoff.h',
'src/core/lib/channel/channel_args.h',
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
@@ -327,6 +325,7 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
+ 'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
@@ -461,6 +460,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h',
'src/core/ext/filters/workarounds/workaround_utils.h',
'src/core/lib/surface/init.cc',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@@ -724,8 +724,6 @@ Pod::Spec.new do |s|
'src/core/lib/support/atomic.h',
'src/core/lib/support/atomic_with_atm.h',
'src/core/lib/support/atomic_with_std.h',
- 'src/core/lib/support/backoff.h',
- 'src/core/lib/support/block_annotate.h',
'src/core/lib/support/env.h',
'src/core/lib/support/memory.h',
'src/core/lib/support/mpscq.h',
@@ -808,6 +806,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/deadline/deadline_filter.h',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/transport/inproc/inproc_transport.h',
+ 'src/core/lib/backoff/backoff.h',
'src/core/lib/channel/channel_args.h',
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
@@ -826,6 +825,7 @@ Pod::Spec.new do |s|
'src/core/lib/http/format_request.h',
'src/core/lib/http/httpcli.h',
'src/core/lib/http/parser.h',
+ 'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
diff --git a/grpc.gemspec b/grpc.gemspec
index c37859f3d1..ce23e6f7df 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -88,8 +88,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/atomic.h )
s.files += %w( src/core/lib/support/atomic_with_atm.h )
s.files += %w( src/core/lib/support/atomic_with_std.h )
- s.files += %w( src/core/lib/support/backoff.h )
- s.files += %w( src/core/lib/support/block_annotate.h )
s.files += %w( src/core/lib/support/env.h )
s.files += %w( src/core/lib/support/memory.h )
s.files += %w( src/core/lib/support/mpscq.h )
@@ -106,7 +104,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/arena.cc )
s.files += %w( src/core/lib/support/atm.cc )
s.files += %w( src/core/lib/support/avl.cc )
- s.files += %w( src/core/lib/support/backoff.cc )
s.files += %w( src/core/lib/support/cmdline.cc )
s.files += %w( src/core/lib/support/cpu_iphone.cc )
s.files += %w( src/core/lib/support/cpu_linux.cc )
@@ -252,6 +249,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/deadline/deadline_filter.h )
s.files += %w( src/core/ext/transport/chttp2/client/chttp2_connector.h )
s.files += %w( src/core/ext/transport/inproc/inproc_transport.h )
+ s.files += %w( src/core/lib/backoff/backoff.h )
s.files += %w( src/core/lib/channel/channel_args.h )
s.files += %w( src/core/lib/channel/channel_stack.h )
s.files += %w( src/core/lib/channel/channel_stack_builder.h )
@@ -270,6 +268,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/http/format_request.h )
s.files += %w( src/core/lib/http/httpcli.h )
s.files += %w( src/core/lib/http/parser.h )
+ s.files += %w( src/core/lib/iomgr/block_annotate.h )
s.files += %w( src/core/lib/iomgr/call_combiner.h )
s.files += %w( src/core/lib/iomgr/closure.h )
s.files += %w( src/core/lib/iomgr/combiner.h )
@@ -408,6 +407,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h )
s.files += %w( src/core/ext/filters/workarounds/workaround_utils.h )
s.files += %w( src/core/lib/surface/init.cc )
+ s.files += %w( src/core/lib/backoff/backoff.cc )
s.files += %w( src/core/lib/channel/channel_args.cc )
s.files += %w( src/core/lib/channel/channel_stack.cc )
s.files += %w( src/core/lib/channel/channel_stack_builder.cc )
diff --git a/grpc.gyp b/grpc.gyp
index 5579f0298a..53e388561f 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -164,7 +164,6 @@
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
- 'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@@ -224,6 +223,7 @@
],
'sources': [
'src/core/lib/surface/init.cc',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@@ -525,6 +525,7 @@
'test/core/util/port_server_client.c',
'test/core/util/slice_splitter.c',
'test/core/util/trickle_endpoint.c',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@@ -731,6 +732,7 @@
'test/core/util/port_server_client.c',
'test/core/util/slice_splitter.c',
'test/core/util/trickle_endpoint.c',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
@@ -922,6 +924,7 @@
'sources': [
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_unsecure.cc',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
diff --git a/include/grpc/impl/codegen/atm_gcc_atomic.h b/include/grpc/impl/codegen/atm_gcc_atomic.h
index 1793ec22b8..76ce863914 100644
--- a/include/grpc/impl/codegen/atm_gcc_atomic.h
+++ b/include/grpc/impl/codegen/atm_gcc_atomic.h
@@ -25,6 +25,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
+#define GPR_ATM_MIN INTPTR_MIN
#ifdef GPR_LOW_LEVEL_COUNTERS
extern gpr_atm gpr_counter_atm_cas;
diff --git a/include/grpc/impl/codegen/atm_gcc_sync.h b/include/grpc/impl/codegen/atm_gcc_sync.h
index 27ae0f63d5..a9e4da3a0f 100644
--- a/include/grpc/impl/codegen/atm_gcc_sync.h
+++ b/include/grpc/impl/codegen/atm_gcc_sync.h
@@ -25,6 +25,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
+#define GPR_ATM_MIN INTPTR_MIN
#define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory")
diff --git a/include/grpc/impl/codegen/atm_windows.h b/include/grpc/impl/codegen/atm_windows.h
index dfcaa4cc37..b868d79aef 100644
--- a/include/grpc/impl/codegen/atm_windows.h
+++ b/include/grpc/impl/codegen/atm_windows.h
@@ -24,6 +24,7 @@
typedef intptr_t gpr_atm;
#define GPR_ATM_MAX INTPTR_MAX
+#define GPR_ATM_MIN INTPTR_MIN
#define gpr_atm_full_barrier MemoryBarrier
diff --git a/package.xml b/package.xml
index 36206890f1..df0142124d 100644
--- a/package.xml
+++ b/package.xml
@@ -100,8 +100,6 @@
<file baseinstalldir="/" name="src/core/lib/support/atomic.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atomic_with_atm.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atomic_with_std.h" role="src" />
- <file baseinstalldir="/" name="src/core/lib/support/backoff.h" role="src" />
- <file baseinstalldir="/" name="src/core/lib/support/block_annotate.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/env.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/memory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/mpscq.h" role="src" />
@@ -118,7 +116,6 @@
<file baseinstalldir="/" name="src/core/lib/support/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/atm.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/avl.cc" role="src" />
- <file baseinstalldir="/" name="src/core/lib/support/backoff.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cmdline.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cpu_iphone.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/cpu_linux.cc" role="src" />
@@ -264,6 +261,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/deadline/deadline_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/client/chttp2_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_transport.h" role="src" />
+ <file baseinstalldir="/" name="src/core/lib/backoff/backoff.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_args.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.h" role="src" />
@@ -282,6 +280,7 @@
<file baseinstalldir="/" name="src/core/lib/http/format_request.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/httpcli.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/http/parser.h" role="src" />
+ <file baseinstalldir="/" name="src/core/lib/iomgr/block_annotate.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/closure.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.h" role="src" />
@@ -420,6 +419,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/workarounds/workaround_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/init.cc" role="src" />
+ <file baseinstalldir="/" name="src/core/lib/backoff/backoff.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_args.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.cc" role="src" />
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index a05a11dad1..31a8fc39ce 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -188,8 +188,8 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
- gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC),
- &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timespec_to_millis_round_up(wa->deadline),
+ &wa->w->on_timeout);
gpr_free(wa);
}
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 8bff7548ac..22c2bc8880 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -71,7 +71,7 @@ typedef enum {
typedef struct {
gpr_refcount refs;
- gpr_timespec timeout;
+ grpc_millis timeout;
wait_for_ready_value wait_for_ready;
} method_parameters;
@@ -101,17 +101,18 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
-static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
+static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
char *buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
char *decimal_point = strchr(buf, '.');
+ int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
- timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
- if (timeout->tv_nsec == -1) {
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
gpr_free(buf);
return false;
}
@@ -130,24 +131,25 @@ static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
gpr_free(buf);
return false;
}
- timeout->tv_nsec *= multiplier;
+ nanos *= multiplier;
}
- timeout->tv_sec = gpr_parse_nonnegative_int(buf);
+ int seconds = gpr_parse_nonnegative_int(buf);
gpr_free(buf);
- if (timeout->tv_sec == -1) return false;
+ if (seconds == -1) return false;
+ *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
return true;
}
static void *method_parameters_create_from_json(const grpc_json *json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
- gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
+ grpc_millis timeout = 0;
for (grpc_json *field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
- if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
+ if (timeout > 0) return NULL; // Duplicate.
if (!parse_timeout(field, &timeout)) return NULL;
}
}
@@ -826,7 +828,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_stack *owning_call;
grpc_call_combiner *call_combiner;
@@ -979,11 +981,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
if (chand->deadline_checking_enabled &&
- gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- const gpr_timespec per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->method_params->timeout != 0) {
+ const grpc_millis per_method_deadline =
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
+ calld->method_params->timeout;
+ if (per_method_deadline < calld->deadline) {
calld->deadline = per_method_deadline;
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
@@ -1422,7 +1424,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
- calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ calld->deadline = args->deadline;
calld->arena = args->arena;
calld->owning_call = args->call_stack;
calld->call_combiner = args->call_combiner;
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index 79ccb0d9bf..b91c93e446 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -38,7 +38,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** deadline for connection */
- gpr_timespec deadline;
+ grpc_millis deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
} grpc_connect_in_args;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 773ae29e41..53fa0fff04 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -103,6 +103,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -112,7 +113,6 @@
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
@@ -397,7 +397,7 @@ typedef struct glb_lb_policy {
grpc_slice lb_call_status_details;
/** LB call retry backoff state */
- gpr_backoff lb_call_backoff_state;
+ grpc_backoff lb_call_backoff_state;
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
@@ -411,7 +411,7 @@ typedef struct glb_lb_policy {
* recreated whenever lb_call is replaced. */
grpc_grpclb_client_stats *client_stats;
/* Interval and timer for next client load report. */
- gpr_timespec client_stats_report_interval;
+ grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
bool client_load_report_timer_pending;
bool last_client_load_report_counters_were_zero;
@@ -1134,21 +1134,19 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec deadline = gpr_time_add(
- now,
- gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
+ grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
- &glb_policy->lb_on_fallback, now);
+ &glb_policy->lb_on_fallback);
}
glb_policy->started_picking = true;
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
}
@@ -1274,17 +1272,15 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
glb_policy->updating_lb_call = false;
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try =
- gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
+ grpc_millis next_try =
+ grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG,
- "... retry_timer_active in %" PRId64 ".%09d seconds.",
- timeout.tv_sec, timeout.tv_nsec);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
+ timeout);
} else {
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
}
@@ -1295,7 +1291,7 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
- &glb_policy->lb_on_call_retry, now);
+ &glb_policy->lb_on_call_retry);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received_locked");
@@ -1306,15 +1302,14 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- const gpr_timespec next_client_load_report_time =
- gpr_time_add(now, glb_policy->client_stats_report_interval);
+ const grpc_millis next_client_load_report_time =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
- &glb_policy->client_load_report_closure, now);
+ &glb_policy->client_load_report_closure);
}
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1408,12 +1403,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
- gpr_timespec deadline =
+ grpc_millis deadline =
glb_policy->lb_call_timeout_ms == 0
- ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
- : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
- GPR_TIMESPAN));
+ ? GRPC_MILLIS_INF_FUTURE
+ : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1444,12 +1437,12 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- gpr_backoff_init(&glb_policy->lb_call_backoff_state,
- GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_GRPCLB_RECONNECT_JITTER,
- GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&glb_policy->lb_call_backoff_state,
+ GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_GRPCLB_RECONNECT_JITTER,
+ GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
@@ -1557,7 +1550,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
if (glb_policy->lb_response_payload != NULL) {
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
@@ -1571,16 +1564,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
if (response->has_client_stats_report_interval) {
- glb_policy->client_stats_report_interval =
- gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
- grpc_grpclb_duration_to_timespec(
- &response->client_stats_report_interval));
+ glb_policy->client_stats_report_interval = GPR_MAX(
+ GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
+ &response->client_stats_report_interval));
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
- "client load reporting interval = %" PRId64 ".%09d sec",
- glb_policy->client_stats_report_interval.tv_sec,
- glb_policy->client_stats_report_interval.tv_nsec);
+ "client load reporting interval = %" PRIdPTR " milliseconds",
+ glb_policy->client_stats_report_interval);
}
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
* strong ref count goes to zero) to be unref'd in
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 8ef6dfc6f4..4d5fb2081c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -299,13 +299,10 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb) {
- gpr_timespec duration;
- duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0;
- duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0;
- duration.clock_type = GPR_TIMESPAN;
- return duration;
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
+ return (grpc_millis)(
+ (duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
+ (duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index c4a98492c9..56b9c096d0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -81,8 +81,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb);
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 69f5877b00..5f7ab987cb 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -32,13 +32,13 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/json/json.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/service_config.h"
@@ -89,7 +89,7 @@ typedef struct {
bool have_retry_timer;
grpc_timer retry_timer;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_lb_addresses *lb_addresses;
@@ -137,7 +137,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
@@ -271,22 +271,20 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
- &r->dns_ares_on_retry_timer_locked, now);
+ &r->dns_ares_on_retry_timer_locked);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -307,7 +305,7 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
} else {
dns_ares_maybe_finish_next_locked(exec_ctx, r);
@@ -381,11 +379,11 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 1c093d0ad2..e669b6dfc7 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -27,11 +27,11 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@@ -70,7 +70,7 @@ typedef struct {
grpc_timer retry_timer;
grpc_closure on_retry;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_resolved_addresses *addresses;
@@ -113,7 +113,7 @@ static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
@@ -126,7 +126,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
@@ -153,6 +153,9 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
+ GRPC_ERROR_REF(error);
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
grpc_lb_addresses *addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
@@ -167,23 +170,21 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
- grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
+ grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -191,6 +192,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
+ GRPC_ERROR_UNREF(error);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@@ -254,11 +256,11 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index bff5001d69..5710a22178 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -31,6 +31,7 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
@@ -38,7 +39,6 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -118,9 +118,9 @@ struct grpc_subchannel {
external_state_watcher root_external_state_watcher;
/** next connect attempt time */
- gpr_timespec next_attempt;
+ grpc_millis next_attempt;
/** backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** do we have an active alarm? */
bool have_alarm;
/** have we started the backoff loop */
@@ -364,7 +364,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
}
}
}
- gpr_backoff_init(
+ grpc_backoff_init(
&c->backoff_state, initial_backoff_ms,
fixed_reconnect_backoff ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
@@ -428,8 +428,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
- c->next_attempt =
- gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
} else {
@@ -464,24 +463,22 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
c->connecting = true;
GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!c->backoff_begun) {
c->backoff_begun = true;
- c->next_attempt = gpr_backoff_begin(&c->backoff_state, now);
+ c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
} else {
GPR_ASSERT(!c->have_alarm);
c->have_alarm = true;
- gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
- if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
- 0) {
+ const grpc_millis time_til_next =
+ c->next_attempt - grpc_exec_ctx_now(exec_ctx);
+ if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately");
} else {
- gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
- time_til_next.tv_sec, time_til_next.tv_nsec);
+ gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
+ grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm);
}
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index c67ff97b0b..46b29f1fe0 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -107,7 +107,7 @@ typedef struct {
grpc_polling_entity *pollent;
grpc_slice path;
gpr_timespec start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_context_element *context;
grpc_call_combiner *call_combiner;
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index 866ce46acf..dc194ec068 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -86,9 +86,8 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- gpr_timespec deadline) {
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
+ grpc_millis deadline) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) {
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
@@ -114,8 +113,7 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
}
GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure);
}
// Cancels the deadline timer.
@@ -155,7 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
struct start_timer_after_init_state {
bool in_call_combiner;
grpc_call_element* elem;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_closure closure;
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
@@ -182,14 +180,13 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
+ if (deadline != GRPC_MILLIS_INF_FUTURE) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
// until after the call stack is fully initialized. If we start the
@@ -214,7 +211,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
}
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- gpr_timespec new_deadline) {
+ grpc_millis new_deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index f4a1110ee6..4a80535b14 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -56,7 +56,8 @@ typedef struct grpc_deadline_state {
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
- gpr_timespec deadline);
+ grpc_millis deadline);
+
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@@ -70,7 +71,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
//
// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- gpr_timespec new_deadline);
+ grpc_millis new_deadline);
// To be called from the client-side filter's start_transport_stream_op_batch()
// method. Ensures that the deadline timer is cancelled when the call
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index f4d5b1427e..ade2e5bc82 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -56,11 +56,11 @@ typedef struct channel_data {
max_connection_idle */
grpc_timer max_idle_timer;
/* Allowed max time a channel may have no outstanding rpcs */
- gpr_timespec max_connection_idle;
+ grpc_millis max_connection_idle;
/* Allowed max time a channel may exist */
- gpr_timespec max_connection_age;
+ grpc_millis max_connection_age;
/* Allowed grace period after the channel reaches its max age */
- gpr_timespec max_connection_age_grace;
+ grpc_millis max_connection_age_grace;
/* Closure to run when the channel's idle duration reaches max_connection_idle
and should be closed gracefully */
grpc_closure close_max_idle_channel;
@@ -99,10 +99,9 @@ static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_idle_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle),
- &chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &chand->max_idle_timer,
+ grpc_exec_ctx_now(exec_ctx) + chand->max_connection_idle,
+ &chand->close_max_idle_channel);
}
}
@@ -123,10 +122,9 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_age_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age),
- &chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &chand->max_age_timer,
+ grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age,
+ &chand->close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
grpc_transport_op* op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &chand->channel_connectivity_changed,
@@ -144,11 +142,12 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
- grpc_timer_init(exec_ctx, &chand->max_age_grace_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- chand->max_connection_age_grace),
- &chand->force_close_max_age_channel,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(
+ exec_ctx, &chand->max_age_grace_timer,
+ chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE
+ ? GRPC_MILLIS_INF_FUTURE
+ : grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age_grace,
+ &chand->force_close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
"max_age start_max_age_grace_timer_after_goaway_op");
@@ -249,7 +248,8 @@ static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
connection storms. Note that the MAX_CONNECTION_AGE option without jitter
would not create connection storms by itself, but if there happened to be a
connection storm it could cause it to repeat at a fixed period. */
-static int add_random_max_connection_age_jitter(int value) {
+static grpc_millis
+add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
/* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and
1 + MAX_CONNECTION_AGE_JITTER */
double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX +
@@ -257,7 +257,9 @@ static int add_random_max_connection_age_jitter(int value) {
double result = multiplier * value;
/* INT_MAX - 0.5 converts the value to float, so that result will not be
cast to int implicitly before the comparison. */
- return result > INT_MAX - 0.5 ? INT_MAX : (int)result;
+ return result > ((double)GRPC_MILLIS_INF_FUTURE) - 0.5
+ ? GRPC_MILLIS_INF_FUTURE
+ : (grpc_millis)result;
}
/* Constructor for call_data. */
@@ -287,45 +289,36 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
chand->max_age_grace_timer_pending = false;
chand->channel_stack = args->channel_stack;
chand->max_connection_age =
- DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(add_random_max_connection_age_jitter(
- DEFAULT_MAX_CONNECTION_AGE_MS),
- GPR_TIMESPAN);
+ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
+ DEFAULT_MAX_CONNECTION_AGE_MS);
chand->max_connection_age_grace =
DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS,
- GPR_TIMESPAN);
- chand->max_connection_idle =
- DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN);
+ ? GRPC_MILLIS_INF_FUTURE
+ : DEFAULT_MAX_CONNECTION_AGE_GRACE_MS;
+ chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : DEFAULT_MAX_CONNECTION_IDLE_MS;
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS);
chand->max_connection_age =
- value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(
- add_random_max_connection_age_jitter(value), GPR_TIMESPAN);
+ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
+ value);
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i],
{DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX});
chand->max_connection_age_grace =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_IDLE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS);
chand->max_connection_idle =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
}
}
GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
@@ -348,8 +341,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
- if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
+ if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) {
/* When the channel reaches its max age, we send down an op with
goaway_error set. However, we can't send down any ops until after the
channel stack is fully initialized. If we start the timer here, we have
@@ -366,8 +358,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
/* Initialize the number of calls as 1, so that the max_idle_timer will not
start until start_max_idle_timer_after_init is invoked. */
gpr_atm_rel_store(&chand->call_count, 1);
- if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
+ if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init");
GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.cc b/src/core/ext/transport/chttp2/server/chttp2_server.cc
index a51959bec7..7ac7f4ece8 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.cc
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc
@@ -134,8 +134,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
connection_state->handshake_mgr);
// TODO(roth): We should really get this timeout value from channel
// args instead of hard-coding it.
- const gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
+ const grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + 120 * GPR_MS_PER_SEC;
grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr,
tcp, state->args, deadline, acceptor,
on_handshake_done, connection_state);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 65167c03bb..e4b19a2c4a 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -159,11 +159,9 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
-static void send_ping_locked(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
- grpc_closure *on_complete,
- grpc_chttp2_initiate_write_reason initiate_write_reason);
+static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_closure *on_initiate,
+ grpc_closure *on_complete);
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
@@ -279,6 +277,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->is_client = is_client;
t->flow_control.remote_window = DEFAULT_WINDOW;
t->flow_control.announced_window = DEFAULT_WINDOW;
+ t->flow_control.target_initial_window_size = DEFAULT_WINDOW;
t->flow_control.t = t;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
@@ -317,17 +316,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_combiner_scheduler(t->combiner));
grpc_bdp_estimator_init(&t->flow_control.bdp_estimator, t->peer_string);
- t->flow_control.last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
- grpc_pid_controller_init(&t->flow_control.pid_controller,
- {
- 4, /* gain_p */
- 8, /* gain_t */
- 0, /* gain_d */
- log2(DEFAULT_WINDOW), /* initial_control_value */
- -1, /* min_control_value */
- 25, /* max_control_value */
- 10 /* integral_range */
- });
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@@ -366,43 +354,33 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- DEFAULT_WINDOW);
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
- t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
- g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
+ t->ping_policy.min_sent_ping_interval_without_data =
+ g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
- t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
- g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
+ t->ping_policy.min_recv_ping_interval_without_data =
+ g_default_min_recv_ping_interval_without_data_ms;
/* Keepalive setting */
if (t->is_client) {
- t->keepalive_time =
- g_default_client_keepalive_time_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_client_keepalive_time_ms,
- GPR_TIMESPAN);
- t->keepalive_timeout =
- g_default_client_keepalive_timeout_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_client_keepalive_timeout_ms,
- GPR_TIMESPAN);
+ t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_client_keepalive_time_ms;
+ t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_client_keepalive_timeout_ms;
} else {
- t->keepalive_time =
- g_default_server_keepalive_time_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_server_keepalive_time_ms,
- GPR_TIMESPAN);
- t->keepalive_timeout =
- g_default_server_keepalive_timeout_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_server_keepalive_timeout_ms,
- GPR_TIMESPAN);
+ t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_server_keepalive_time_ms;
+ t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_server_keepalive_timeout_ms;
}
t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls;
@@ -447,23 +425,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
- gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- {g_default_min_sent_ping_interval_without_data_ms, 0,
- INT_MAX}),
- GPR_TIMESPAN);
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ grpc_integer_options{
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
- gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- {g_default_min_recv_ping_interval_without_data_ms, 0,
- INT_MAX}),
- GPR_TIMESPAN);
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ grpc_integer_options{
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
@@ -476,22 +452,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
- {t->is_client ? g_default_client_keepalive_time_ms
- : g_default_server_keepalive_time_ms,
- 1, INT_MAX});
- t->keepalive_time = value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ grpc_integer_options{t->is_client
+ ? g_default_client_keepalive_time_ms
+ : g_default_server_keepalive_time_ms,
+ 1, INT_MAX});
+ t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
- {t->is_client ? g_default_client_keepalive_timeout_ms
- : g_default_server_keepalive_timeout_ms,
- 0, INT_MAX});
- t->keepalive_timeout = value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ grpc_integer_options{t->is_client
+ ? g_default_client_keepalive_timeout_ms
+ : g_default_server_keepalive_timeout_ms,
+ 0, INT_MAX});
+ t->keepalive_timeout =
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls =
@@ -571,23 +546,27 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
- t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
/* Start keepalive pings */
- if (gpr_time_cmp(t->keepalive_time, gpr_inf_future(GPR_TIMESPAN)) != 0) {
+ if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
+ grpc_chttp2_act_on_flowctl_action(
+ exec_ctx,
+ grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
+ NULL);
+
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
post_benign_reclaimer(exec_ctx, t);
@@ -698,7 +677,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
- s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ s->deadline = GRPC_MILLIS_INF_FUTURE;
GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
@@ -902,9 +881,6 @@ static void inc_initiate_write_reason(
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
break;
- case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
- GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx);
- break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
exec_ctx);
@@ -1042,6 +1018,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
+ GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
@@ -1140,14 +1117,12 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR,
"Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\"");
- double current_keepalive_time_ms =
- gpr_timespec_to_micros(t->keepalive_time) / 1000;
+ double current_keepalive_time_ms = (double)t->keepalive_time;
t->keepalive_time =
current_keepalive_time_ms > INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis((int64_t)(current_keepalive_time_ms *
- KEEPALIVE_TIME_BACKOFF_MULTIPLIER),
- GPR_TIMESPAN);
+ ? GRPC_MILLIS_INF_FUTURE
+ : (grpc_millis)(current_keepalive_time_ms *
+ KEEPALIVE_TIME_BACKOFF_MULTIPLIER);
}
/* lie: use transient failure from the transport to indicate goaway has been
@@ -1461,8 +1436,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (t->is_client) {
- s->deadline =
- gpr_time_min(s->deadline, s->send_initial_metadata->deadline);
+ s->deadline = GPR_MIN(s->deadline, s->send_initial_metadata->deadline);
}
if (metadata_size > metadata_peer_limit) {
grpc_chttp2_cancel_stream(
@@ -1646,8 +1620,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
&t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES,
already_received);
grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control),
+ exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
+ &s->flow_control),
t, s);
}
}
@@ -1680,16 +1654,14 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (!t->is_client) {
if (op->send_initial_metadata) {
- gpr_timespec deadline =
+ grpc_millis deadline =
op->payload->send_initial_metadata.send_initial_metadata->deadline;
- GPR_ASSERT(0 ==
- gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
+ GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
if (op->send_trailing_metadata) {
- gpr_timespec deadline =
+ grpc_millis deadline =
op->payload->send_trailing_metadata.send_trailing_metadata->deadline;
- GPR_ASSERT(0 ==
- gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
+ GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
}
@@ -1713,28 +1685,21 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
- for (size_t i = 0; i < GRPC_CHTTP2_PING_TYPE_COUNT; i++) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[i];
- for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
- grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
- }
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
+ grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
}
GRPC_ERROR_UNREF(error);
}
-static void send_ping_locked(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
- grpc_closure *on_ack,
- grpc_chttp2_initiate_write_reason initiate_write_reason) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
+static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_closure *on_initiate, grpc_closure *on_ack) {
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE);
- if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
- GRPC_ERROR_NONE)) {
- grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason);
- }
+ grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
+ GRPC_ERROR_NONE);
}
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
@@ -1749,8 +1714,7 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint64_t id) {
- grpc_chttp2_ping_queue *pq =
- &t->ping_queues[id % GRPC_CHTTP2_PING_TYPE_COUNT];
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
if (pq->inflight_id != id) {
char *from = grpc_endpoint_get_peer(t->ep);
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id);
@@ -1769,8 +1733,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
grpc_slice slice;
- grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL,
- &slice, &http_error);
+ grpc_error_get_status(exec_ctx, error, GRPC_MILLIS_INF_FUTURE, NULL, &slice,
+ &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t,
@@ -1780,7 +1744,7 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
- gpr_log(GPR_DEBUG, "PING strike");
+ t->ping_recv_state.ping_strikes++;
if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes &&
t->ping_policy.max_ping_strikes != 0) {
send_goaway(exec_ctx, t,
@@ -1820,9 +1784,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_ping) {
- send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL,
- op->send_ping,
- GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
+ send_ping_locked(exec_ctx, t, NULL, op->send_ping);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}
if (op->on_connectivity_state_change != NULL) {
@@ -2069,7 +2033,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
if (!s->read_closed || !s->write_closed) {
if (s->id != 0) {
grpc_http2_error_code http_error;
- grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error);
+ grpc_error_get_status(exec_ctx, due_to_error, s->deadline, NULL, NULL,
+ &http_error);
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
@@ -2087,7 +2052,7 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_status_code status;
grpc_slice slice;
- grpc_error_get_status(error, s->deadline, &status, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL);
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
@@ -2252,7 +2217,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_slice slice;
- grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, s->deadline, &grpc_status, &slice,
+ NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@@ -2469,10 +2435,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
if (action.need_ping) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator);
- send_ping_locked(exec_ctx, t,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
- &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked,
- GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING);
+ send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
+ &t->finish_bdp_ping_locked);
}
}
@@ -2580,7 +2544,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_bdp_action(&t->flow_control), t,
+ exec_ctx,
+ grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control, NULL), t,
NULL);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
@@ -2613,7 +2578,7 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
- grpc_bdp_estimator_complete_ping(&t->flow_control.bdp_estimator);
+ grpc_bdp_estimator_complete_ping(exec_ctx, &t->flow_control.bdp_estimator);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
}
@@ -2687,24 +2652,22 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
- send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
- &t->start_keepalive_ping_locked,
- &t->finish_keepalive_ping_locked,
- GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
+ send_ping_locked(exec_ctx, t, &t->start_keepalive_ping_locked,
+ &t->finish_keepalive_ping_locked);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else {
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
} else if (error == GRPC_ERROR_CANCELLED) {
/* The keepalive ping timer may be cancelled by bdp */
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
}
@@ -2713,10 +2676,9 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
- grpc_timer_init(
- exec_ctx, &t->keepalive_watchdog_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout),
- &t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->keepalive_watchdog_fired_locked);
}
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -2727,10 +2689,9 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
@@ -2830,9 +2791,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
bs->next_action.max_size_hint,
cur_length);
grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control), t,
- s);
+ exec_ctx, grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
+ &s->flow_control),
+ t, s);
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
if (s->frame_storage.length > 0) {
@@ -3180,8 +3141,6 @@ const char *grpc_chttp2_initiate_write_reason_string(
return "TRANSPORT_FLOW_CONTROL";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
return "SEND_SETTINGS";
- case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
- return "BDP_ESTIMATOR_PING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
return "FLOW_CONTROL_UNSTALLED_BY_SETTING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc
index 75eae1f962..639e51da70 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.cc
+++ b/src/core/ext/transport/chttp2/transport/flow_control.cc
@@ -176,11 +176,9 @@ static void trace_action(grpc_chttp2_transport_flowctl* tfc,
/* How many bytes of incoming flow control would we like to advertise */
static uint32_t grpc_chttp2_target_announced_window(
const grpc_chttp2_transport_flowctl* tfc) {
- return (uint32_t)GPR_MIN(
- (int64_t)((1u << 31) - 1),
- tfc->announced_stream_total_over_incoming_window +
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
+ return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
+ tfc->announced_stream_total_over_incoming_window +
+ tfc->target_initial_window_size);
}
// we have sent data on the wire, we must track this in our bookkeeping for the
@@ -282,13 +280,14 @@ grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc,
// Returns a non zero announce integer if we should send a transport window
// update
uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl* tfc) {
+ grpc_chttp2_transport_flowctl* tfc, bool writing_anyway) {
PRETRACE(tfc, NULL);
uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
uint32_t threshold_to_send_transport_window_update =
tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4
: target_announced_window / 2;
- if (tfc->announced_window <= threshold_to_send_transport_window_update &&
+ if ((writing_anyway ||
+ tfc->announced_window <= threshold_to_send_transport_window_update) &&
tfc->announced_window != target_announced_window) {
uint32_t announce = (uint32_t)GPR_CLAMP(
target_announced_window - tfc->announced_window, 0, UINT32_MAX);
@@ -393,15 +392,26 @@ static grpc_chttp2_flowctl_urgency delta_is_significant(
// Takes in a target and uses the pid controller to return a stabilized
// guess at the new bdp.
-static double get_pid_controller_guess(grpc_chttp2_transport_flowctl* tfc,
+static double get_pid_controller_guess(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport_flowctl* tfc,
double target) {
- double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec dt_timespec = gpr_time_sub(now, tfc->last_pid_update);
- double dt = (double)dt_timespec.tv_sec + dt_timespec.tv_nsec * 1e-9;
- if (dt > 0.1) {
- dt = 0.1;
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (!tfc->pid_controller_initialized) {
+ tfc->last_pid_update = now;
+ tfc->pid_controller_initialized = true;
+ grpc_pid_controller_init(
+ &tfc->pid_controller,
+ (grpc_pid_controller_args){.gain_p = 4,
+ .gain_i = 8,
+ .gain_d = 0,
+ .initial_control_value = target,
+ .min_control_value = -1,
+ .max_control_value = 25,
+ .integral_range = 10});
+ return pow(2, target);
}
+ double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller);
+ double dt = (double)(now - tfc->last_pid_update) * 1e-3;
double log2_bdp_guess =
grpc_pid_controller_update(&tfc->pid_controller, bdp_error, dt);
tfc->last_pid_update = now;
@@ -414,20 +424,25 @@ static double get_target_under_memory_pressure(
// do not increase window under heavy memory pressure.
double memory_pressure = grpc_resource_quota_get_memory_pressure(
grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep)));
- if (memory_pressure > 0.8) {
- target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1);
+ static const double kLowMemPressure = 0.1;
+ static const double kZeroTarget = 22;
+ static const double kHighMemPressure = 0.8;
+ static const double kMaxMemPressure = 0.9;
+ if (memory_pressure < kLowMemPressure && target < kZeroTarget) {
+ target = (target - kZeroTarget) * memory_pressure / kLowMemPressure +
+ kZeroTarget;
+ } else if (memory_pressure > kHighMemPressure) {
+ target *= 1 - GPR_MIN(1, (memory_pressure - kHighMemPressure) /
+ (kMaxMemPressure - kHighMemPressure));
}
return target;
}
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_transport_flowctl* tfc,
+ grpc_chttp2_stream_flowctl* sfc) {
grpc_chttp2_flowctl_action action;
memset(&action, 0, sizeof(action));
- uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
- if (tfc->announced_window < target_announced_window / 2) {
- action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
- }
// TODO(ncteisen): tune this
if (sfc != NULL && !sfc->s->read_closed) {
uint32_t sent_init_window =
@@ -442,20 +457,12 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
}
}
- TRACEACTION(tfc, action);
- return action;
-}
-
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
- grpc_chttp2_transport_flowctl* tfc) {
- grpc_chttp2_flowctl_action action;
- memset(&action, 0, sizeof(action));
if (tfc->enable_bdp_probe) {
- action.need_ping = grpc_bdp_estimator_need_ping(&tfc->bdp_estimator);
+ action.need_ping =
+ grpc_bdp_estimator_need_ping(exec_ctx, &tfc->bdp_estimator);
// get bdp estimate and update initial_window accordingly.
int64_t estimate = -1;
- int32_t bdp = -1;
if (grpc_bdp_estimator_get_estimate(&tfc->bdp_estimator, &estimate)) {
double target = 1 + log2((double)estimate);
@@ -466,17 +473,18 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
// run our target through the pid controller to stabilize change.
// TODO(ncteisen): experiment with other controllers here.
- double bdp_guess = get_pid_controller_guess(tfc, target);
+ double bdp_guess = get_pid_controller_guess(exec_ctx, tfc, target);
// Though initial window 'could' drop to 0, we keep the floor at 128
- bdp = GPR_MAX((int32_t)bdp_guess, 128);
+ tfc->target_initial_window_size =
+ (int32_t)GPR_CLAMP(bdp_guess, 128, INT32_MAX);
grpc_chttp2_flowctl_urgency init_window_update_urgency =
- delta_is_significant(tfc, bdp,
+ delta_is_significant(tfc, tfc->target_initial_window_size,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
action.send_setting_update = init_window_update_urgency;
- action.initial_window_size = (uint32_t)bdp;
+ action.initial_window_size = (uint32_t)tfc->target_initial_window_size;
}
}
@@ -485,8 +493,9 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) {
// we target the max of BDP or bandwidth in microseconds.
int32_t frame_size = (int32_t)GPR_CLAMP(
- GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384,
- 16777215);
+ GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
+ tfc->target_initial_window_size),
+ 16384, 16777215);
grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
@@ -497,7 +506,10 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
}
}
}
-
+ uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
+ if (tfc->announced_window < target_announced_window / 2) {
+ action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
+ }
TRACEACTION(tfc, action);
return action;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.cc b/src/core/ext/transport/chttp2/transport/frame_ping.cc
index d431d6b2df..1cfa883ee1 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc
@@ -89,10 +89,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
if (!t->is_client) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_allowed_ping =
- gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- t->ping_policy.min_recv_ping_interval_without_data);
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis next_allowed_ping =
+ t->ping_recv_state.last_ping_recv_time +
+ t->ping_policy.min_recv_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@@ -100,11 +100,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
no less than two hours. When there is no outstanding streams, we
restrict the number of PINGS equivalent to TCP Keep-Alive. */
next_allowed_ping =
- gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- gpr_time_from_seconds(7200, GPR_TIMESPAN));
+ t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
- if (gpr_time_cmp(next_allowed_ping, now) > 0) {
+ if (next_allowed_ping > now) {
grpc_chttp2_add_ping_strike(exec_ctx, t);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index 5f1a2708a5..17b8c4ab85 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -535,12 +535,12 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
#define TIMEOUT_KEY "grpc-timeout"
static void deadline_enc(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
+ grpc_chttp2_hpack_compressor *c, grpc_millis deadline,
framer_state *st) {
char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem mdelem;
- grpc_http2_encode_timeout(
- gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
+ grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx),
+ timeout_str);
mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT,
grpc_slice_from_copied_string(timeout_str));
hpack_enc(exec_ctx, c, mdelem, st);
@@ -660,8 +660,8 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
- gpr_timespec deadline = metadata->deadline;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
+ grpc_millis deadline = metadata->deadline;
+ if (deadline != GRPC_MILLIS_INF_FUTURE) {
deadline_enc(exec_ctx, c, deadline, &st);
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index ba680a89db..187ce0ea87 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -29,7 +29,7 @@ void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
buffer->arena = arena;
grpc_metadata_batch_init(&buffer->batch);
- buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
@@ -62,7 +62,7 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline) {
buffer->batch.deadline = deadline;
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index 9ffcabd0b9..995e8001b1 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -47,7 +47,7 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 96af18f1d1..b51854fcf8 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -66,12 +66,6 @@ typedef enum {
} grpc_chttp2_write_state;
typedef enum {
- GRPC_CHTTP2_PING_ON_NEXT_WRITE = 0,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
- GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */
-} grpc_chttp2_ping_type;
-
-typedef enum {
GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY,
GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT,
} grpc_chttp2_optimization_target;
@@ -97,7 +91,6 @@ typedef enum {
GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
- GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING,
@@ -118,19 +111,19 @@ typedef struct {
typedef struct {
int max_pings_without_data;
int max_ping_strikes;
- gpr_timespec min_sent_ping_interval_without_data;
- gpr_timespec min_recv_ping_interval_without_data;
+ grpc_millis min_sent_ping_interval_without_data;
+ grpc_millis min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {
- gpr_timespec last_ping_sent_time;
+ grpc_millis last_ping_sent_time;
int pings_before_data_required;
grpc_timer delayed_ping_timer;
bool is_delayed_ping_timer_set;
} grpc_chttp2_repeated_ping_state;
typedef struct {
- gpr_timespec last_ping_recv_time;
+ grpc_millis last_ping_recv_time;
int ping_strikes;
} grpc_chttp2_server_ping_recv_state;
@@ -269,6 +262,8 @@ typedef struct {
* to send WINDOW_UPDATE frames. */
int64_t announced_window;
+ int32_t target_initial_window_size;
+
/** should we probe bdp? */
bool enable_bdp_probe;
@@ -276,8 +271,9 @@ typedef struct {
grpc_bdp_estimator bdp_estimator;
/* pid controller */
+ bool pid_controller_initialized;
grpc_pid_controller pid_controller;
- gpr_timespec last_pid_update;
+ grpc_millis last_pid_update;
// pointer back to transport for tracing
const grpc_chttp2_transport *t;
@@ -374,7 +370,7 @@ struct grpc_chttp2_transport {
uint32_t last_new_stream_id;
/** ping queues for various ping insertion points */
- grpc_chttp2_ping_queue ping_queues[GRPC_CHTTP2_PING_TYPE_COUNT];
+ grpc_chttp2_ping_queue ping_queue;
grpc_chttp2_repeated_ping_policy ping_policy;
grpc_chttp2_repeated_ping_state ping_state;
uint64_t ping_ctr; /* unique id for pings */
@@ -459,9 +455,9 @@ struct grpc_chttp2_transport {
/** watchdog to kill the transport when waiting for the keepalive ping */
grpc_timer keepalive_watchdog_timer;
/** time duration in between pings */
- gpr_timespec keepalive_time;
+ grpc_millis keepalive_time;
/** grace period for a ping to complete before watchdog kicks in */
- gpr_timespec keepalive_timeout;
+ grpc_millis keepalive_timeout;
/** if keepalive pings are allowed when there's no outstanding streams */
bool keepalive_permit_without_calls;
/** keep-alive state machine state */
@@ -570,7 +566,7 @@ struct grpc_chttp2_stream {
grpc_error *byte_stream_error; /* protected by t combiner */
bool received_last_frame; /* protected by t combiner */
- gpr_timespec deadline;
+ grpc_millis deadline;
/** saw some stream level error */
grpc_error *forced_close_error;
@@ -711,7 +707,7 @@ grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc,
// returns an announce if we should send a transport update to our peer,
// else returns zero
uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl *tfc);
+ grpc_chttp2_transport_flowctl *tfc, bool writing_anyway);
// returns an announce if we should send a stream update to our peer, else
// returns zero
@@ -758,10 +754,8 @@ typedef struct {
// Reads the flow control data and returns and actionable struct that will tell
// chttp2 exactly what it needs to do
grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc);
-
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
- grpc_chttp2_transport_flowctl *tfc);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_flowctl *tfc,
+ grpc_chttp2_stream_flowctl *sfc);
// Takes in a flow control action and performs all the needed operations.
void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc
index 3db1ad4123..78886b497a 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.cc
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -359,8 +359,9 @@ static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
s == NULL ? NULL : &s->flow_control,
t->incoming_frame_size);
grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_action(
- &t->flow_control, s == NULL ? NULL : &s->flow_control),
+ exec_ctx,
+ grpc_chttp2_flowctl_get_action(exec_ctx, &t->flow_control,
+ s == NULL ? NULL : &s->flow_control),
t, s);
if (err != GRPC_ERROR_NONE) {
goto error_handler;
@@ -385,7 +386,7 @@ error_handler:
t->parser_data = &s->data_parser;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
- t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@@ -430,26 +431,27 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
- gpr_timespec *cached_timeout =
- (gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout);
- gpr_timespec timeout;
+ grpc_millis *cached_timeout =
+ static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout));
+ grpc_millis timeout;
if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
- cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec));
+ cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
gpr_free(val);
- *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
+ *cached_timeout = GRPC_MILLIS_INF_FUTURE;
}
timeout = *cached_timeout;
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
} else {
timeout = *cached_timeout;
}
- grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &s->metadata_buffer[0],
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout));
+ if (timeout != GRPC_MILLIS_INF_FUTURE) {
+ grpc_chttp2_incoming_metadata_buffer_set_deadline(
+ &s->metadata_buffer[0], grpc_exec_ctx_now(exec_ctx) + timeout);
+ }
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
@@ -564,7 +566,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
- t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index ba3d55abb3..4134890f3f 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -42,18 +42,9 @@ static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->write_cb_pool = cb;
}
-static void collapse_pings_from_into(grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
- grpc_chttp2_ping_queue *pq) {
- for (size_t i = 0; i < GRPC_CHTTP2_PCL_COUNT; i++) {
- grpc_closure_list_move(&t->ping_queues[ping_type].lists[i], &pq->lists[i]);
- }
-}
-
static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
/* no ping needed: wait */
return;
@@ -62,7 +53,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
/* ping already in-flight: wait */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string);
+ gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
return;
}
@@ -71,51 +63,38 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
/* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
- t->peer_string, t->ping_state.pings_before_data_required,
+ gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string,
+ t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
return;
}
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_allowed_ping =
- gpr_time_add(t->ping_state.last_ping_sent_time,
- t->ping_policy.min_sent_ping_interval_without_data);
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis next_allowed_ping =
+ t->ping_state.last_ping_sent_time +
+ t->ping_policy.min_sent_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
- next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- gpr_time_from_seconds(7200, GPR_TIMESPAN));
+ next_allowed_ping =
+ t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
- /* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
- (int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
- (int)now.tv_sec, (int)now.tv_nsec); */
- if (gpr_time_cmp(next_allowed_ping, now) > 0) {
+ if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG,
- "Ping delayed [%p]: not enough time elapsed since last ping",
- t->peer_string);
+ "%s: Ping delayed [%p]: not enough time elapsed since last ping",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
- next_allowed_ping, &t->retry_initiate_ping_locked,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ next_allowed_ping, &t->retry_initiate_ping_locked);
}
return;
}
- /* coalesce equivalent pings into this one */
- switch (ping_type) {
- case GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE:
- collapse_pings_from_into(t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, pq);
- break;
- case GRPC_CHTTP2_PING_ON_NEXT_WRITE:
- break;
- case GRPC_CHTTP2_PING_TYPE_COUNT:
- GPR_UNREACHABLE_CODE(break);
- }
- pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type;
+ pq->inflight_id = t->ping_ctr;
t->ping_ctr++;
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
@@ -126,7 +105,8 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
t->ping_state.last_ping_sent_time = now;
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
+ gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
@@ -156,6 +136,25 @@ static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
return sched_any;
}
+static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ const char *staller) {
+ gpr_log(
+ GPR_DEBUG,
+ "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
+ ":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
+ t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
+ s->flow_controlled_bytes_flowed,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
+ t->flow_control.remote_window,
+ (uint32_t)GPR_MAX(
+ 0,
+ s->flow_control.remote_window_delta +
+ (int64_t)t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
+ s->flow_control.remote_window_delta);
+}
+
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
gpr_atm count;
do {
@@ -202,6 +201,12 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
+ for (size_t i = 0; i < t->ping_ack_count; i++) {
+ grpc_slice_buffer_add(&t->outbuf,
+ grpc_chttp2_ping_create(1, t->ping_acks[i]));
+ }
+ t->ping_ack_count = 0;
+
/* simple writes are queued to qbuf, and flushed here */
grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
@@ -270,8 +275,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
s->send_initial_metadata, &hopt, &t->outbuf);
now_writing = true;
if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
}
initial_metadata_writes++;
@@ -300,6 +304,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
"send_initial_metadata_finished");
}
+
/* send any window updates */
uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
&t->flow_control, &s->flow_control);
@@ -308,8 +313,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing));
if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
}
flow_control_writes++;
@@ -386,8 +390,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
}
if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = 0;
t->ping_recv_state.ping_strikes = 0;
}
if (is_last_frame) {
@@ -414,9 +417,11 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
message_writes++;
} else if (t->flow_control.remote_window == 0) {
+ report_stall(t, s, "transport");
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
} else if (stream_remote_window == 0) {
+ report_stall(t, s, "stream");
grpc_chttp2_list_add_stalled_by_stream(t, s);
now_writing = true;
}
@@ -450,6 +455,10 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
+ if (!t->is_client) {
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
+ t->ping_recv_state.ping_strikes = 0;
+ }
if (!t->is_client && !s->read_closed) {
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
@@ -483,30 +492,21 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
}
- uint32_t transport_announce =
- grpc_chttp2_flowctl_maybe_send_transport_update(&t->flow_control);
+ maybe_initiate_ping(exec_ctx, t);
+
+ uint32_t transport_announce = grpc_chttp2_flowctl_maybe_send_transport_update(
+ &t->flow_control, t->outbuf.count > 0);
if (transport_announce) {
- maybe_initiate_ping(exec_ctx, t,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE);
grpc_transport_one_way_stats throwaway_stats;
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
}
}
- for (size_t i = 0; i < t->ping_ack_count; i++) {
- grpc_slice_buffer_add(&t->outbuf,
- grpc_chttp2_ping_create(1, t->ping_acks[i]));
- }
- t->ping_ack_count = 0;
-
- maybe_initiate_ping(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE);
-
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
result.writing = t->outbuf.count > 0;
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 31739d07dd..1001d74c22 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -150,7 +150,7 @@ typedef struct inproc_stream {
grpc_metadata_batch write_buffer_initial_md;
bool write_buffer_initial_md_filled;
uint32_t write_buffer_initial_md_flags;
- gpr_timespec write_buffer_deadline;
+ grpc_millis write_buffer_deadline;
slice_buffer_list write_buffer_message;
grpc_metadata_batch write_buffer_trailing_md;
bool write_buffer_trailing_md_filled;
@@ -180,7 +180,7 @@ typedef struct inproc_stream {
grpc_error *cancel_self_error;
grpc_error *cancel_other_error;
- gpr_timespec deadline;
+ grpc_millis deadline;
bool listed;
struct inproc_stream *stream_list_prev;
@@ -377,8 +377,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->cancel_self_error = GRPC_ERROR_NONE;
s->cancel_other_error = GRPC_ERROR_NONE;
s->write_buffer_cancel_error = GRPC_ERROR_NONE;
- s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ s->deadline = GRPC_MILLIS_INF_FUTURE;
+ s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
s->stream_list_prev = NULL;
gpr_mu_lock(&t->mu->mu);
@@ -421,7 +421,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
cs->write_buffer_initial_md_flags,
&s->to_read_initial_md, &s->to_read_initial_md_flags,
&s->to_read_initial_md_filled);
- s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline);
+ s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md);
cs->write_buffer_initial_md_filled = false;
}
@@ -956,10 +956,10 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
dest, destflags, destfilled);
}
if (s->t->is_client) {
- gpr_timespec *dl =
+ grpc_millis *dl =
(other == NULL) ? &s->write_buffer_deadline : &other->deadline;
- *dl = gpr_time_min(*dl, op->payload->send_initial_metadata
- .send_initial_metadata->deadline);
+ *dl = GPR_MIN(*dl, op->payload->send_initial_metadata
+ .send_initial_metadata->deadline);
s->initial_md_sent = true;
}
}
diff --git a/src/core/lib/support/backoff.cc b/src/core/lib/backoff/backoff.cc
index 6dc0df473b..fe0a751817 100644
--- a/src/core/lib/support/backoff.cc
+++ b/src/core/lib/backoff/backoff.cc
@@ -16,13 +16,14 @@
*
*/
-#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/backoff/backoff.h"
#include <grpc/support/useful.h>
-void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
- double multiplier, double jitter,
- int64_t min_timeout_millis, int64_t max_timeout_millis) {
+void grpc_backoff_init(grpc_backoff *backoff,
+ grpc_millis initial_connect_timeout, double multiplier,
+ double jitter, grpc_millis min_timeout_millis,
+ grpc_millis max_timeout_millis) {
backoff->initial_connect_timeout = initial_connect_timeout;
backoff->multiplier = multiplier;
backoff->jitter = jitter;
@@ -31,11 +32,11 @@ void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
-gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) {
+grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
- const int64_t first_timeout =
+ const grpc_millis first_timeout =
GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis);
- return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN));
+ return grpc_exec_ctx_now(exec_ctx) + first_timeout;
}
/* Generate a random number between 0 and 1. */
@@ -44,11 +45,11 @@ static double generate_uniform_random_number(uint32_t *rng_state) {
return *rng_state / (double)((uint32_t)1 << 31);
}
-gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
+grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
const double new_timeout_millis =
backoff->multiplier * (double)backoff->current_timeout_millis;
backoff->current_timeout_millis =
- GPR_MIN((int64_t)new_timeout_millis, backoff->max_timeout_millis);
+ GPR_MIN((grpc_millis)new_timeout_millis, backoff->max_timeout_millis);
const double jitter_range_width = backoff->jitter * new_timeout_millis;
const double jitter =
@@ -56,17 +57,17 @@ gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
jitter_range_width;
backoff->current_timeout_millis =
- (int64_t)((double)(backoff->current_timeout_millis) + jitter);
+ (grpc_millis)((double)(backoff->current_timeout_millis) + jitter);
- const gpr_timespec current_deadline = gpr_time_add(
- now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
+ const grpc_millis current_deadline =
+ grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis;
- const gpr_timespec min_deadline = gpr_time_add(
- now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN));
+ const grpc_millis min_deadline =
+ grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis;
- return gpr_time_max(current_deadline, min_deadline);
+ return GPR_MAX(current_deadline, min_deadline);
}
-void gpr_backoff_reset(gpr_backoff *backoff) {
+void grpc_backoff_reset(grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
}
diff --git a/src/core/lib/support/backoff.h b/src/core/lib/backoff/backoff.h
index 31ec28f666..80e49ea52a 100644
--- a/src/core/lib/support/backoff.h
+++ b/src/core/lib/backoff/backoff.h
@@ -16,10 +16,10 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_BACKOFF_H
-#define GRPC_CORE_LIB_SUPPORT_BACKOFF_H
+#ifndef GRPC_CORE_LIB_BACKOFF_BACKOFF_H
+#define GRPC_CORE_LIB_BACKOFF_BACKOFF_H
-#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
#ifdef __cplusplus
extern "C" {
@@ -27,38 +27,40 @@ extern "C" {
typedef struct {
/// const: how long to wait after the first failure before retrying
- int64_t initial_connect_timeout;
+ grpc_millis initial_connect_timeout;
/// const: factor with which to multiply backoff after a failed retry
double multiplier;
/// const: amount to randomize backoffs
double jitter;
/// const: minimum time between retries in milliseconds
- int64_t min_timeout_millis;
+ grpc_millis min_timeout_millis;
/// const: maximum time between retries in milliseconds
- int64_t max_timeout_millis;
+ grpc_millis max_timeout_millis;
/// random number generator
uint32_t rng_state;
/// current retry timeout in milliseconds
- int64_t current_timeout_millis;
-} gpr_backoff;
+ grpc_millis current_timeout_millis;
+} grpc_backoff;
/// Initialize backoff machinery - does not need to be destroyed
-void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
- double multiplier, double jitter,
- int64_t min_timeout_millis, int64_t max_timeout_millis);
+void grpc_backoff_init(grpc_backoff *backoff,
+ grpc_millis initial_connect_timeout, double multiplier,
+ double jitter, grpc_millis min_timeout_millis,
+ grpc_millis max_timeout_millis);
/// Begin retry loop: returns a timespec for the NEXT retry
-gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now);
+grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
/// Step a retry loop: returns a timespec for the NEXT retry
-gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now);
-/// Reset the backoff, so the next gpr_backoff_step will be a gpr_backoff_begin
+grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
+/// Reset the backoff, so the next grpc_backoff_step will be a
+/// grpc_backoff_begin
/// instead
-void gpr_backoff_reset(gpr_backoff *backoff);
+void grpc_backoff_reset(grpc_backoff *backoff);
#ifdef __cplusplus
}
#endif
-#endif /* GRPC_CORE_LIB_SUPPORT_BACKOFF_H */
+#endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index f0de80f0c0..5c00c09889 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -70,7 +70,7 @@ typedef struct {
grpc_call_context_element *context;
grpc_slice path;
gpr_timespec start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_combiner *call_combiner;
} grpc_call_element_args;
diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc
index 1753da5721..b27ee37e5b 100644
--- a/src/core/lib/channel/handshaker.cc
+++ b/src/core/lib/channel/handshaker.cc
@@ -232,7 +232,7 @@ static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
- gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
+ grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data) {
gpr_mu_lock(&mgr->mu);
GPR_ASSERT(mgr->index == 0);
@@ -255,9 +255,7 @@ void grpc_handshake_manager_do_handshake(
gpr_ref(&mgr->refs);
GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &mgr->deadline_timer,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &mgr->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout);
// Start first handshaker, which also owns a ref.
gpr_ref(&mgr->refs);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE);
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index a857cde791..51ee56af43 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -149,7 +149,7 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
- gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
+ grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
/// Add \a mgr to the server side list of all pending handshake managers, the
diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc
index c0aec63c1d..5bd7884e28 100644
--- a/src/core/lib/debug/stats_data.cc
+++ b/src/core/lib/debug/stats_data.cc
@@ -77,6 +77,7 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_initiate_write_due_to_transport_flow_control_unstalled",
"http2_initiate_write_due_to_ping_response",
"http2_initiate_write_due_to_force_rst_stream",
+ "http2_spurious_writes_begun",
"hpack_recv_indexed",
"hpack_recv_lithdr_incidx",
"hpack_recv_lithdr_incidx_v",
@@ -177,6 +178,7 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"'transport_flow_control_unstalled'",
"Number of HTTP2 writes initiated due to 'ping_response'",
"Number of HTTP2 writes initiated due to 'force_rst_stream'",
+ "Number of HTTP2 writes initiated with nothing to write",
"Number of HPACK indexed fields received",
"Number of HPACK literal headers received with incremental indexing",
"Number of HPACK literal headers received with incremental indexing and "
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index cf5bafbd04..d8e4e7d264 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -83,6 +83,7 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN,
GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
@@ -330,6 +331,9 @@ typedef enum {
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
index b5c15ff55c..5c0ab2262e 100644
--- a/src/core/lib/debug/stats_data.yaml
+++ b/src/core/lib/debug/stats_data.yaml
@@ -189,6 +189,8 @@
doc: Number of HTTP2 writes initiated due to 'ping_response'
- counter: http2_initiate_write_due_to_force_rst_stream
doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
+- counter: http2_spurious_writes_begun
+ doc: Number of HTTP2 writes initiated with nothing to write
- counter: hpack_recv_indexed
doc: Number of HPACK indexed fields received
- counter: hpack_recv_lithdr_incidx
@@ -270,3 +272,4 @@
- counter: server_slowpath_requests_queued
doc: How many times was the server slow path taken (indicates too few
outstanding requests)
+
diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql
index f96e40c00e..54869977b0 100644
--- a/src/core/lib/debug/stats_data_bq_schema.sql
+++ b/src/core/lib/debug/stats_data_bq_schema.sql
@@ -52,6 +52,7 @@ http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
+http2_spurious_writes_begun_per_iteration:FLOAT,
hpack_recv_indexed_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc
index db995943a9..c96800b85c 100644
--- a/src/core/lib/http/httpcli.cc
+++ b/src/core/lib/http/httpcli.cc
@@ -44,7 +44,7 @@ typedef struct {
grpc_endpoint *ep;
char *host;
char *ssl_host_override;
- gpr_timespec deadline;
+ grpc_millis deadline;
int have_read_byte;
const grpc_httpcli_handshaker *handshaker;
grpc_closure *on_done;
@@ -65,7 +65,7 @@ static grpc_httpcli_post_override g_post_override = NULL;
static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint, const char *host,
- gpr_timespec deadline,
+ grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_endpoint *endpoint)) {
@@ -240,7 +240,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
internal_request *req =
@@ -278,9 +278,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
- grpc_httpcli_response *response) {
+ const grpc_httpcli_request *request, grpc_millis deadline,
+ grpc_closure *on_done, grpc_httpcli_response *response) {
char *name;
if (g_get_override &&
g_get_override(exec_ctx, request, deadline, on_done, response)) {
@@ -298,7 +297,7 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
char *name;
if (g_post_override &&
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 630481da54..3e6bdc0e46 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -46,7 +46,7 @@ typedef struct grpc_httpcli_context {
typedef struct {
const char *default_port;
void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
- const char *host, gpr_timespec deadline,
+ const char *host, grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint));
} grpc_httpcli_handshaker;
@@ -87,8 +87,8 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_complete,
+ const grpc_httpcli_request *request, grpc_millis deadline,
+ grpc_closure *on_complete,
grpc_httpcli_response *response);
/* Asynchronously perform a HTTP POST.
@@ -110,18 +110,18 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
- gpr_timespec deadline, grpc_closure *on_complete,
+ grpc_millis deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
/* override functions return 1 if they handled the request, 0 otherwise */
typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline,
+ grpc_millis deadline,
grpc_closure *on_complete,
grpc_httpcli_response *response);
typedef int (*grpc_httpcli_post_override)(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ const char *body_bytes, size_t body_size, grpc_millis deadline,
grpc_closure *on_complete, grpc_httpcli_response *response);
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
diff --git a/src/core/lib/http/httpcli_security_connector.cc b/src/core/lib/http/httpcli_security_connector.cc
index 8a0f225ba2..ef6c4a509b 100644
--- a/src/core/lib/http/httpcli_security_connector.cc
+++ b/src/core/lib/http/httpcli_security_connector.cc
@@ -157,7 +157,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *tcp, const char *host,
- gpr_timespec deadline,
+ grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint)) {
on_done_closure *c = (on_done_closure *)gpr_malloc(sizeof(*c));
diff --git a/src/core/lib/support/block_annotate.h b/src/core/lib/iomgr/block_annotate.h
index 8e3ef7df65..cbcb5d92f0 100644
--- a/src/core/lib/support/block_annotate.h
+++ b/src/core/lib/iomgr/block_annotate.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
-#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
+#ifndef GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
+#define GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
#ifdef __cplusplus
extern "C" {
@@ -47,9 +47,13 @@ void gpr_thd_end_blocking_region();
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION \
- do { \
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
+ do { \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
+ do { \
+ grpc_exec_ctx_invalidate_now((ec)); \
} while (0)
#endif
-#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */
+#endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 689aac15bf..6126e2771c 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -24,6 +24,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <string.h>
@@ -39,12 +40,12 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
@@ -561,25 +562,17 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_TIMER_END("pollset_shutdown", 0);
}
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, now) <= 0) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX) {
+ return INT_MAX;
+ } else if (delta < 0) {
return 0;
+ } else {
+ return (int)delta;
}
-
- static const gpr_timespec round_up = {
- 0, /* tv_sec */
- GPR_NS_PER_MS - 1, /* tv_nsec */
- GPR_TIMESPAN /* clock_type */
- };
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
- return millis >= 1 ? millis : 1;
}
/* Process the epoll events found by do_epoll_wait() function.
@@ -636,11 +629,11 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
(i.e the designated poller thread) will be calling this function. So there is
no need for any synchronization when accesing fields in g_epoll_set */
static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("do_epoll_wait", 0);
int r;
- int timeout = poll_deadline_to_millis_timeout(deadline, now);
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (timeout != 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
}
@@ -650,7 +643,7 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
@@ -668,9 +661,10 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return GRPC_ERROR_NONE;
}
-static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl, gpr_timespec *now,
- gpr_timespec deadline) {
+static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ grpc_pollset_worker **worker_hdl,
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("begin_worker", 0);
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
@@ -755,14 +749,15 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
pollset->shutting_down);
}
- if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
+ if (gpr_cv_wait(&worker->cv, &pollset->mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
}
}
- *now = gpr_now(now->clock_type);
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
if (GRPC_TRACER_ON(grpc_polling_trace)) {
@@ -941,7 +936,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollset_work";
@@ -952,7 +947,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return GRPC_ERROR_NONE;
}
- if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) {
+ if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!ps->shutting_down);
@@ -975,8 +970,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
designated poller */
if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
gpr_atm_acq_load(&g_epoll_set.num_events)) {
- append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline),
- err_desc);
+ append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
}
append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index bed47e4388..59dd8fd2fe 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -25,6 +25,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <string.h>
@@ -38,7 +39,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/is_epollexclusive_available.h"
#include "src/core/lib/iomgr/lockfree_event.h"
@@ -46,19 +47,18 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/spinlock.h"
/*******************************************************************************
* Polling object
*/
-
typedef enum {
PO_POLLING_GROUP,
PO_POLLSET_SET,
PO_POLLSET,
- PO_FD, /* ordering is important: we always want to lock pollsets before fds:
- this guarantees that using an fd as a pollable is safe */
+ PO_FD,
+ /* ordering is important: we always want to lock pollsets before fds:
+ this guarantees that using an fd as a pollable is safe */
PO_EMPTY_POLLABLE,
PO_COUNT
} polling_obj_type;
@@ -690,32 +690,16 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
*mu = &pollset->pollable_obj.po.mu;
}
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, now) <= 0) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX)
+ return INT_MAX;
+ else if (delta < 0)
return 0;
- }
-
- static const gpr_timespec round_up = {
- 0, /* tv_sec */
- GPR_NS_PER_MS - 1, /* tv_nsec */
- GPR_TIMESPAN /* clock_type */
- };
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
- return millis >= 1 ? millis : 1;
+ else
+ return (int)delta;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
@@ -810,9 +794,8 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- pollable *p, gpr_timespec now,
- gpr_timespec deadline) {
- int timeout = poll_deadline_to_millis_timeout(deadline, now);
+ pollable *p, grpc_millis deadline) {
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
char *desc = pollable_desc(p);
@@ -829,7 +812,7 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
@@ -884,9 +867,10 @@ static worker_remove_result worker_remove(grpc_pollset_worker **root,
}
/* Return true if this thread should poll */
-static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl, gpr_timespec *now,
- gpr_timespec deadline) {
+static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ grpc_pollset_worker **worker_hdl,
+ grpc_millis deadline) {
bool do_poll = true;
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
@@ -910,10 +894,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker,
- poll_deadline_to_millis_timeout(deadline, *now));
+ poll_deadline_to_millis_timeout(exec_ctx, deadline));
}
while (do_poll && worker->pollable_obj->root_worker != worker) {
- if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) {
+ if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
@@ -936,7 +921,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_mu_lock(&pollset->pollable_obj.po.mu);
gpr_mu_lock(&worker->pollable_obj->po.mu);
}
- *now = gpr_now(now->clock_type);
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
return do_poll && pollset->shutdown_closure == NULL &&
@@ -967,14 +952,13 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (0 && GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRId64
- ".%09d deadline=%" PRId64 ".%09d kwp=%d root_worker=%p",
- pollset, worker_hdl, &worker, now.tv_sec, now.tv_nsec,
- deadline.tv_sec, deadline.tv_nsec, pollset->kicked_without_poller,
- pollset->root_worker);
+ gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
+ " deadline=%" PRIdPTR " kwp=%d root_worker=%p",
+ pollset, worker_hdl, &worker, grpc_exec_ctx_now(exec_ctx), deadline,
+ pollset->kicked_without_poller, pollset->root_worker);
}
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollset_work";
@@ -985,7 +969,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->current_pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&pollset->current_pollable_obj->po.mu);
}
- if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
+ if (begin_worker(exec_ctx, pollset, &worker, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
@@ -996,7 +980,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_unlock(&pollset->pollable_obj.po.mu);
if (pollset->event_cursor == pollset->event_count) {
append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj,
- now, deadline),
+ deadline),
err_desc);
}
append_error(&error, pollset_process_events(exec_ctx, pollset, false),
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index c8e07c6e18..370ea1d50b 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -25,6 +25,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <signal.h>
@@ -40,13 +41,13 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
@@ -1089,30 +1090,16 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->shutdown_done = NULL;
}
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX)
+ return INT_MAX;
+ else if (delta < 0)
return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
- return millis >= 1 ? millis : 1;
+ else
+ return (int)delta;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
@@ -1243,7 +1230,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
ep_rv =
epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
@@ -1310,10 +1297,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("pollset_work", 0);
grpc_error *error = GRPC_ERROR_NONE;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
+ int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline);
sigset_t new_mask;
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index e170702dca..036a35690c 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -24,6 +24,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <string.h>
#include <sys/socket.h>
@@ -37,12 +38,11 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/murmur_hash.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
@@ -50,7 +50,6 @@
/*******************************************************************************
* FD declarations
*/
-
typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
@@ -200,8 +199,8 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now);
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline);
/* Allow kick to wakeup the currently polling worker */
#define GRPC_POLLSET_CAN_KICK_SELF 1
@@ -876,7 +875,7 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) {
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
grpc_error *error = GRPC_ERROR_NONE;
@@ -945,7 +944,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd_watcher *watchers;
struct pollfd *pfds;
- timeout = poll_deadline_to_millis_timeout(deadline, now);
+ timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (pollset->fd_count + 2 <= inline_elements) {
pfds = pollfd_space;
@@ -991,7 +990,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GRPC_SCHEDULING_START_BLOCKING_REGION;
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = grpc_poll_function(pfds, pfd_count, timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
@@ -1068,13 +1067,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (queued_work || worker.kicked_specifically) {
/* If there's queued work on the list, then set the deadline to be
immediate so we get back out of the polling loop quickly */
- deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ deadline = 0;
}
keep_polling = 1;
}
- if (keep_polling) {
- now = gpr_now(now.clock_type);
- }
}
gpr_tls_set(&g_current_thread_poller, 0);
if (added_worker) {
@@ -1126,21 +1122,14 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- return gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) return -1;
+ if (deadline == 0) return 0;
+ grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx);
+ if (n < 0) return 0;
+ if (n > INT_MAX) return -1;
+ return (int)n;
}
/*******************************************************************************
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 4d3ae2228e..e4033fab1d 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -205,9 +205,9 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline) {
- return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
+ grpc_pollset_worker **worker,
+ grpc_millis deadline) {
+ return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline);
}
grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 5ad1c13ee6..955326c5f7 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -56,8 +56,8 @@ typedef struct grpc_event_engine_vtable {
grpc_closure *closure);
void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline);
+ grpc_pollset_worker **worker,
+ grpc_millis deadline);
grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 41c69add17..3d17afcb8f 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -104,9 +104,69 @@ static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
-void grpc_exec_ctx_global_init(void) {}
+static gpr_timespec
+ g_start_time[GPR_TIMESPAN + 1]; // assumes GPR_TIMESPAN is the
+ // last enum value in
+ // gpr_clock_type
+
+void grpc_exec_ctx_global_init(void) {
+ for (int i = 0; i < GPR_TIMESPAN; i++) {
+ g_start_time[i] = gpr_now((gpr_clock_type)i);
+ }
+ // allows uniform treatment in conversion functions
+ g_start_time[GPR_TIMESPAN] = gpr_time_0(GPR_TIMESPAN);
+}
+
void grpc_exec_ctx_global_shutdown(void) {}
+static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time[ts.clock_type]);
+ double x =
+ GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
+
+static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time[ts.clock_type]);
+ double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
+ (double)ts.tv_nsec / GPR_NS_PER_MS +
+ (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
+
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) {
+ if (!exec_ctx->now_is_valid) {
+ exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx->now_is_valid = true;
+ }
+ return exec_ctx->now;
+}
+
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx) {
+ exec_ctx->now_is_valid = false;
+}
+
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
+ gpr_clock_type clock_type) {
+ if (clock_type == GPR_TIMESPAN) {
+ return gpr_time_from_millis(millis, GPR_TIMESPAN);
+ }
+ return gpr_time_add(g_start_time[clock_type],
+ gpr_time_from_millis(millis, GPR_TIMESPAN));
+}
+
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) {
+ return timespec_to_atm_round_down(ts);
+}
+
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
+ return timespec_to_atm_round_up(ts);
+}
+
static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
exec_ctx_run, exec_ctx_sched, "exec_ctx"};
static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index a93728f0a6..44b9be7aa9 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -19,14 +19,19 @@
#ifndef GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
#define GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
+#include <grpc/support/atm.h>
#include <grpc/support/cpu.h>
+
#include "src/core/lib/iomgr/closure.h"
#ifdef __cplusplus
extern "C" {
#endif
-/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */
+typedef gpr_atm grpc_millis;
+
+#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX
+#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
@@ -70,6 +75,9 @@ struct grpc_exec_ctx {
unsigned starting_cpu;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
+
+ bool now_is_valid;
+ grpc_millis now;
};
/* initializer for grpc_exec_ctx:
@@ -77,7 +85,7 @@ struct grpc_exec_ctx {
#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \
- finish_check_arg, finish_check \
+ finish_check_arg, finish_check, false, 0 \
}
/* initialize an execution context at the top level of an API call into grpc
@@ -110,6 +118,12 @@ void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_shutdown(void);
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx);
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx);
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index ebe7f240b4..92c3e70301 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -178,6 +178,7 @@ static void executor_thread(void *arg) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
+ grpc_exec_ctx_invalidate_now(&exec_ctx);
subtract_depth = run_closures(&exec_ctx, exec);
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc
index c082179c0b..336cc86c75 100644
--- a/src/core/lib/iomgr/iocp_windows.cc
+++ b/src/core/lib/iomgr/iocp_windows.cc
@@ -26,6 +26,7 @@
#include <grpc/support/log.h>
#include <grpc/support/log_windows.h>
#include <grpc/support/thd.h>
+#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iocp_windows.h"
@@ -40,25 +41,17 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static DWORD deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
+static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline) {
gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) {
return INFINITE;
}
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- return (DWORD)gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+ return (DWORD)GPR_MAX(0, deadline - grpc_exec_ctx_now(exec_ctx));
}
grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@@ -67,9 +60,9 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- success = GetQueuedCompletionStatus(
- g_iocp, &bytes, &completion_key, &overlapped,
- deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type)));
+ success =
+ GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
+ deadline_to_millis_timeout(exec_ctx, deadline));
if (success == 0 && overlapped == NULL) {
return GRPC_IOCP_WORK_TIMEOUT;
}
@@ -121,7 +114,7 @@ void grpc_iocp_flush(void) {
grpc_iocp_work_status work_status;
do {
- work_status = grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC));
+ work_status = grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_PAST);
} while (work_status == GRPC_IOCP_WORK_KICK ||
grpc_exec_ctx_flush(&exec_ctx));
}
@@ -129,7 +122,7 @@ void grpc_iocp_flush(void) {
void grpc_iocp_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (gpr_atm_acq_load(&g_custom_events)) {
- grpc_iocp_work(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_FUTURE);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h
index 341c159501..aefe7a294a 100644
--- a/src/core/lib/iomgr/iocp_windows.h
+++ b/src/core/lib/iomgr/iocp_windows.h
@@ -34,7 +34,7 @@ typedef enum {
} grpc_iocp_work_status;
grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
- gpr_timespec deadline);
+ grpc_millis deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_flush(void);
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index 3a0605833a..d6a5b4a76c 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -51,7 +51,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
gpr_cv_init(&g_rcv);
grpc_exec_ctx_global_init();
grpc_executor_init(exec_ctx);
- grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_list_init(exec_ctx);
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = (char *)"root";
grpc_network_status_init();
@@ -98,8 +98,9 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
}
last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
}
- if (grpc_timer_check(exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL) ==
- GRPC_TIMERS_FIRED) {
+ exec_ctx->now_is_valid = true;
+ exec_ctx->now = GRPC_MILLIS_INF_FUTURE;
+ if (grpc_timer_check(exec_ctx, NULL) == GRPC_TIMERS_FIRED) {
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(exec_ctx);
grpc_iomgr_platform_flush();
diff --git a/src/core/lib/iomgr/load_file.cc b/src/core/lib/iomgr/load_file.cc
index 0b4d41ea4b..5cb4099ea4 100644
--- a/src/core/lib/iomgr/load_file.cc
+++ b/src/core/lib/iomgr/load_file.cc
@@ -25,7 +25,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/support/block_annotate.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/support/string.h"
grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
@@ -73,6 +73,6 @@ end:
GRPC_ERROR_UNREF(error);
error = error_out;
}
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
return error;
}
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 28d63949ea..799fae154c 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -75,8 +75,8 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
pollset
lock */
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline) GRPC_MUST_USE_RESULT;
+ grpc_pollset_worker **worker,
+ grpc_millis deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index 7ea5019ad5..b9901bf8ef 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -116,13 +116,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
uint64_t timeout;
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_unlock(&grpc_polling_mu);
if (grpc_pollset_work_run_loop) {
- if (gpr_time_cmp(deadline, now) >= 0) {
- timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (deadline >= now) {
+ timeout = deadline - now;
} else {
timeout = 0;
}
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index eb295d3eeb..bb4df83fc1 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -110,7 +110,7 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
@@ -159,7 +159,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&worker);
added_worker = 1;
while (!worker.kicked) {
- if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) {
+ if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
break;
}
}
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index 60cfeebd47..1b783495df 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -33,10 +33,10 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
static grpc_error *blocking_resolve_address_impl(
@@ -81,7 +81,7 @@ static grpc_error *blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
/* Retry if well-known service name is recognized */
@@ -90,7 +90,7 @@ static grpc_error *blocking_resolve_address_impl(
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, svc[i][1], &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
break;
}
}
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index abcfc2114d..451f01a701 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -34,10 +34,10 @@
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
typedef struct {
@@ -87,7 +87,7 @@ static grpc_error *blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getaddrinfo");
goto done;
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index 60262435b3..ecb5747da8 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -89,6 +89,8 @@ struct grpc_resource_user {
grpc_closure_list on_allocated;
/* True if we are currently trying to allocate from the quota, false if not */
bool allocating;
+ /* How many bytes of allocations are outstanding */
+ int64_t outstanding_allocations;
/* True if we are currently trying to add ourselves to the non-free quota
list, false otherwise */
bool added_to_free_pool;
@@ -153,6 +155,9 @@ struct grpc_resource_quota {
char *name;
};
+static void ru_unref_by(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, gpr_atm amount);
+
/*******************************************************************************
* list management
*/
@@ -289,6 +294,25 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
+ if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
+ " free_pool=%" PRId64,
+ resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
+ resource_user->free_pool);
+ }
+ if (gpr_atm_no_barrier_load(&resource_user->shutdown)) {
+ resource_user->allocating = false;
+ grpc_closure_list_fail_all(
+ &resource_user->on_allocated,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown"));
+ int64_t aborted_allocations = resource_user->outstanding_allocations;
+ resource_user->outstanding_allocations = 0;
+ resource_user->free_pool += aborted_allocations;
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
+ gpr_mu_unlock(&resource_user->mu);
+ ru_unref_by(exec_ctx, resource_user, (gpr_atm)aborted_allocations);
+ continue;
+ }
if (resource_user->free_pool < 0 &&
-resource_user->free_pool <= resource_quota->free_pool) {
int64_t amt = -resource_user->free_pool;
@@ -308,6 +332,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
}
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
+ resource_user->outstanding_allocations = 0;
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
} else {
@@ -488,6 +513,9 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
+ }
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
@@ -497,6 +525,9 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
resource_user->reclaimers[1] = NULL;
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+ if (resource_user->allocating) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
}
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
@@ -718,6 +749,7 @@ grpc_resource_user *grpc_resource_user_create(
resource_user->reclaimers[1] = NULL;
resource_user->new_reclaimers[0] = NULL;
resource_user->new_reclaimers[1] = NULL;
+ resource_user->outstanding_allocations = 0;
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = NULL;
}
@@ -778,6 +810,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&resource_user->mu);
ru_ref_by(resource_user, (gpr_atm)size);
resource_user->free_pool -= (int64_t)size;
+ resource_user->outstanding_allocations += (int64_t)size;
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
@@ -792,6 +825,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
} else {
+ resource_user->outstanding_allocations -= (int64_t)size;
GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index 18cf6114f2..1b102b5784 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -39,7 +39,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline);
+ grpc_millis deadline);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 7d9e9533fd..5611dd9062 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -48,7 +48,6 @@ extern grpc_tracer_flag grpc_tcp_trace;
typedef struct {
gpr_mu mu;
grpc_fd *fd;
- gpr_timespec deadline;
grpc_timer alarm;
grpc_closure on_alarm;
int refs;
@@ -244,7 +243,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
@@ -325,9 +324,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&ac->mu);
GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &ac->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
@@ -342,7 +339,7 @@ void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
@@ -350,7 +347,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
index 83835978f4..f3e9366299 100644
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -119,7 +119,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *resolved_addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_uv_tcp_connect *connect;
grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
(void)channel_args;
@@ -158,9 +158,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &connect->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &connect->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);
}
// overridden by api_fuzzer.c
@@ -169,7 +167,7 @@ void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
@@ -177,7 +175,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 1154965c82..9adf7ee4e9 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -43,7 +43,6 @@ typedef struct {
grpc_closure *on_done;
gpr_mu mu;
grpc_winsocket *socket;
- gpr_timespec deadline;
grpc_timer alarm;
grpc_closure on_alarm;
char *addr_name;
@@ -126,7 +125,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
static void tcp_client_connect_impl(
grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr, gpr_timespec deadline) {
+ const grpc_resolved_address *addr, grpc_millis deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
@@ -206,8 +205,7 @@ static void tcp_client_connect_impl(
GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
@@ -233,7 +231,7 @@ void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
@@ -241,7 +239,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 7e271294fd..7fcaef7679 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -135,13 +135,11 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec deadline =
- gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
+ grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC;
GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
- GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
- grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
- now, deadline));
+ GRPC_LOG_IF_ERROR(
+ "backup_poller:pollset_work",
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, deadline));
gpr_mu_unlock(p->pollset_mu);
/* last "uncovered" notification is the ref that keeps us polling, if we get
* there try a cas to release it */
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index 466600d582..419e834cf1 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -45,8 +45,7 @@ typedef struct grpc_timer grpc_timer;
application callback is also responsible for maintaining information about
when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now);
+ grpc_millis deadline, grpc_closure *closure);
/* Initialize *timer without setting it. This can later be passed through
the regular init or cancel */
@@ -96,8 +95,8 @@ typedef enum {
with high probability at least one thread in the system will see an update
at any time slice. */
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next);
-void grpc_timer_list_init(gpr_timespec now);
+ grpc_millis *next);
+void grpc_timer_list_init(grpc_exec_ctx *exec_ctx);
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx);
/* Consume a kick issued by grpc_kick_poller */
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 971d80d8bc..b8e895de6f 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -220,9 +220,6 @@ struct shared_mutables {
static struct shared_mutables g_shared_mutables;
-static gpr_clock_type g_clock_type;
-static gpr_timespec g_start_time;
-
static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
if (a > GPR_ATM_MAX - b) {
return GPR_ATM_MAX;
@@ -235,52 +232,19 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
gpr_atm *next,
grpc_error *error);
-static gpr_timespec dbl_to_ts(double d) {
- gpr_timespec ts;
- ts.tv_sec = (int64_t)d;
- ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
- ts.clock_type = GPR_TIMESPAN;
- return ts;
-}
-
-static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
- ts = gpr_time_sub(ts, g_start_time);
- double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
- (double)ts.tv_nsec / GPR_NS_PER_MS +
- (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
- if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
-}
-
-static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
- ts = gpr_time_sub(ts, g_start_time);
- double x =
- GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
- if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
-}
-
-static gpr_timespec atm_to_timespec(gpr_atm x) {
- return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
-}
-
static gpr_atm compute_min_deadline(timer_shard *shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init(gpr_timespec now) {
+void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
uint32_t i;
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
- g_clock_type = now.clock_type;
- g_start_time = now;
- g_shared_mutables.min_timer = timespec_to_atm_round_down(now);
+ g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
grpc_register_tracer(&grpc_timer_trace);
@@ -317,10 +281,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
g_shared_mutables.initialized = false;
}
-static double ts_to_dbl(gpr_timespec ts) {
- return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
-}
-
/* returns true if the first element in the list */
static void list_join(grpc_timer *head, grpc_timer *timer) {
timer->next = head;
@@ -361,24 +321,20 @@ static void note_deadline_change(timer_shard *shard) {
void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now) {
+ grpc_millis deadline, grpc_closure *closure) {
int is_first_timer = 0;
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
- GPR_ASSERT(deadline.clock_type == g_clock_type);
- GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
- gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
+ timer->deadline = deadline;
#ifndef NDEBUG
timer->hash_table_next = NULL;
#endif
if (GRPC_TRACER_ON(grpc_timer_trace)) {
- gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
- "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
- timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec,
- now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb);
+ gpr_log(GPR_DEBUG,
+ "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
+ deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
}
if (!g_shared_mutables.initialized) {
@@ -391,7 +347,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_mu_lock(&shard->mu);
timer->pending = true;
- if (gpr_time_cmp(deadline, now) <= 0) {
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (deadline <= now) {
timer->pending = false;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
@@ -400,11 +357,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
grpc_time_averaged_stats_add_sample(&shard->stats,
- ts_to_dbl(gpr_time_sub(deadline, now)));
+ (double)(deadline - now) / 1000.0);
ADD_TO_HASH_TABLE(timer);
- if (deadline_atm < shard->queue_deadline_cap) {
+ if (deadline < shard->queue_deadline_cap) {
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
timer->heap_index = INVALID_HEAP_INDEX;
@@ -435,12 +392,12 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
shard->min_deadline);
}
- if (deadline_atm < shard->min_deadline) {
+ if (deadline < shard->min_deadline) {
gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
- shard->min_deadline = deadline_atm;
+ shard->min_deadline = deadline;
note_deadline_change(shard);
- if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) {
- gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm);
+ if (shard->shard_queue_index == 0 && deadline < old_min_deadline) {
+ gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline);
grpc_kick_poller();
}
}
@@ -544,8 +501,9 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
}
if (timer->deadline > now) return NULL;
if (GRPC_TRACER_ON(grpc_timer_trace)) {
- gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
- now - timer->deadline);
+ gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
+ timer, now - timer->deadline,
+ timer->closure->scheduler->vtable->name);
}
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
@@ -567,6 +525,10 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
+ if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
+ (int)(shard - g_shards), n);
+ }
return n;
}
@@ -639,29 +601,27 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
}
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next) {
+ grpc_millis *next) {
// prelude
- GPR_ASSERT(now.clock_type == g_clock_type);
- gpr_atm now_atm = timespec_to_atm_round_down(now);
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
- gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer);
- if (now_atm < min_timer) {
+ grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer);
+ if (now < min_timer) {
if (next != NULL) {
- *next =
- atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
+ *next = GPR_MIN(*next, min_timer);
}
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG,
- "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
- now_atm, min_timer);
+ "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
+ min_timer);
}
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}
grpc_error *shutdown_error =
- gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
+ now != GRPC_MILLIS_INF_FUTURE
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
@@ -671,34 +631,24 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
- next->tv_nsec, timespec_to_atm_round_down(*next));
+ gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
- gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR
- "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
- now.tv_sec, now.tv_nsec, now_atm, next_str,
- gpr_tls_get(&g_last_seen_min_timer),
+ gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR
+ " next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
+ now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
gpr_free(next_str);
}
// actual code
- grpc_timer_check_result r;
- gpr_atm next_atm;
- if (next == NULL) {
- r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error);
- } else {
- next_atm = timespec_to_atm_round_down(*next);
- r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error);
- *next = atm_to_timespec(next_atm);
- }
+ grpc_timer_check_result r =
+ run_some_expired_timers(exec_ctx, now, next, shutdown_error);
// tracing
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
char *next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
- next->tv_nsec, next_atm);
+ gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 9b54fab898..1248f82189 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -55,7 +55,7 @@ static bool g_kicked;
static bool g_has_timed_waiter;
// the deadline of the current timed waiter thread (only relevant if
// g_has_timed_waiter is true)
-static gpr_timespec g_timed_waiter_deadline;
+static grpc_millis g_timed_waiter_deadline;
// generation counter to track which thread is waiting for the next timer
static uint64_t g_timed_waiter_generation;
@@ -99,9 +99,8 @@ static void start_timer_thread_and_unlock(void) {
void grpc_timer_manager_tick() {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_timespec next = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- grpc_timer_check(&exec_ctx, now, &next);
+ grpc_millis next = GRPC_MILLIS_INF_FUTURE;
+ grpc_timer_check(&exec_ctx, &next);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -124,6 +123,9 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
gpr_mu_unlock(&g_mu);
}
// without our lock, flush the exec_ctx
+ if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ gpr_log(GPR_DEBUG, "flush exec_ctx");
+ }
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&g_mu);
// garbage collect any threads hanging out that are dead
@@ -136,8 +138,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
// wait until 'next' (or forever if there is already a timed waiter in the pool)
// returns true if the thread should continue executing (false if it should
// shutdown)
-static bool wait_until(gpr_timespec next) {
- const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
gpr_mu_lock(&g_mu);
// if we're not threaded anymore, leave
if (!g_threaded) {
@@ -171,30 +172,29 @@ static bool wait_until(gpr_timespec next) {
unless their 'next' is earlier than the current timed-waiter's deadline
(in which case the thread with earlier 'next' takes over as the new timed
waiter) */
- if (gpr_time_cmp(next, inf_future) != 0) {
- if (!g_has_timed_waiter ||
- (gpr_time_cmp(next, g_timed_waiter_deadline) < 0)) {
+ if (next != GRPC_MILLIS_INF_FUTURE) {
+ if (!g_has_timed_waiter || (next < g_timed_waiter_deadline)) {
my_timed_waiter_generation = ++g_timed_waiter_generation;
g_has_timed_waiter = true;
g_timed_waiter_deadline = next;
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
- gpr_timespec wait_time =
- gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds",
- wait_time.tv_sec, wait_time.tv_nsec);
+ grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx);
+ gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
+ wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
- next = inf_future;
+ next = GRPC_MILLIS_INF_FUTURE;
}
}
if (GRPC_TRACER_ON(grpc_timer_check_trace) &&
- gpr_time_cmp(next, inf_future) == 0) {
+ next == GRPC_MILLIS_INF_FUTURE) {
gpr_log(GPR_DEBUG, "sleep until kicked");
}
- gpr_cv_wait(&g_cv_wait, &g_mu, next);
+ gpr_cv_wait(&g_cv_wait, &g_mu,
+ grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME));
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
@@ -206,7 +206,7 @@ static bool wait_until(gpr_timespec next) {
// there's work to do after checking timers (code above)
if (my_timed_waiter_generation == g_timed_waiter_generation) {
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
}
}
@@ -222,12 +222,11 @@ static bool wait_until(gpr_timespec next) {
}
static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
- const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC);
for (;;) {
- gpr_timespec next = inf_future;
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ grpc_millis next = GRPC_MILLIS_INF_FUTURE;
+ grpc_exec_ctx_invalidate_now(exec_ctx);
// check timer state, updates next to the next time to run a check
- switch (grpc_timer_check(exec_ctx, now, &next)) {
+ switch (grpc_timer_check(exec_ctx, &next)) {
case GRPC_TIMERS_FIRED:
run_some_timers(exec_ctx);
break;
@@ -244,10 +243,10 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
}
- next = inf_future;
+ next = GRPC_MILLIS_INF_FUTURE;
/* fall through */
case GRPC_TIMERS_CHECKED_AND_EMPTY:
- if (!wait_until(next)) {
+ if (!wait_until(exec_ctx, next)) {
return;
}
break;
@@ -303,7 +302,7 @@ void grpc_timer_manager_init(void) {
g_completed_threads = NULL;
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
start_threads();
}
@@ -350,7 +349,7 @@ void grpc_kick_poller(void) {
gpr_mu_lock(&g_mu);
g_kicked = true;
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
++g_timed_waiter_generation;
gpr_cv_signal(&g_cv_wait);
gpr_mu_unlock(&g_mu);
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index 53f79b545a..bd1e922c7f 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -55,19 +55,18 @@ void run_expired_timer(uv_timer_t *handle) {
}
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now) {
+ grpc_millis deadline, grpc_closure *closure) {
uint64_t timeout;
uv_timer_t *uv_timer;
GRPC_UV_ASSERT_SAME_THREAD();
timer->closure = closure;
- if (gpr_time_cmp(deadline, now) <= 0) {
+ if (deadline <= grpc_exec_ctx_now(exec_ctx)) {
timer->pending = 0;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
timer->pending = 1;
- timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx));
uv_timer = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
uv_timer->data = timer;
@@ -91,7 +90,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
}
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next) {
+ grpc_millis *next) {
return GRPC_TIMERS_NOT_CHECKED;
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index 8fe5802d49..5b2ddceb4a 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -97,7 +97,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
- gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
+ grpc_millis max_detection_delay = GPR_MS_PER_SEC;
grpc_pollset *pollset = (grpc_pollset *)gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(pollset, &g_polling_mu);
@@ -116,7 +116,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
exec_ctx, &context, &detector.pollent, resource_quota, &request,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
+ grpc_exec_ctx_now(exec_ctx) + max_detection_delay,
GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector,
grpc_schedule_on_exec_ctx),
&detector.response);
@@ -133,8 +133,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
"pollset_work",
grpc_pollset_work(exec_ctx,
grpc_polling_entity_pollset(&detector.pollent),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC)))) {
+ &worker, GRPC_MILLIS_INF_FUTURE))) {
detector.is_done = 1;
detector.success = 0;
}
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
index aea16dee92..39e72c195b 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
@@ -384,7 +384,7 @@ void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) {
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
/* Max delay defaults to one minute. */
-gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN};
+grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC;
typedef struct {
char *email_domain;
@@ -711,7 +711,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
resource_quota = grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
+ grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -838,10 +838,10 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
resource_quota = grpc_resource_quota_create("jwt_verifier");
- grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
- http_cb, &ctx->responses[rsp_idx]);
+ grpc_httpcli_get(exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent,
+ resource_quota, &req,
+ grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
+ http_cb, &ctx->responses[rsp_idx]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index 0603811627..998365e75c 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -85,7 +85,7 @@ typedef struct {
/* Globals to control the verifier. Not thread-safe. */
extern gpr_timespec grpc_jwt_verifier_clock_skew;
-extern gpr_timespec grpc_jwt_verifier_max_delay;
+extern grpc_millis grpc_jwt_verifier_max_delay;
/* The verifier can be created with some custom mappings to help with key
discovery in the case where the issuer is an email address.
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
index 0a801bec82..f52a424e36 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
@@ -117,7 +117,7 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx,
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
grpc_exec_ctx *exec_ctx, const grpc_http_response *response,
- grpc_mdelem *token_md, gpr_timespec *token_lifetime) {
+ grpc_mdelem *token_md, grpc_millis *token_lifetime) {
char *null_terminated_body = NULL;
char *new_access_token = NULL;
grpc_credentials_status status = GRPC_CREDENTIALS_OK;
@@ -183,9 +183,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
}
gpr_asprintf(&new_access_token, "%s %s", token_type->value,
access_token->value);
- token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
- token_lifetime->tv_nsec = 0;
- token_lifetime->clock_type = GPR_TIMESPAN;
+ *token_lifetime = strtol(expires_in->value, NULL, 10) * GPR_MS_PER_SEC;
if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md);
*token_md = grpc_mdelem_from_slices(
exec_ctx,
@@ -214,7 +212,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)r->creds;
grpc_mdelem access_token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_credentials_status status =
grpc_oauth2_token_fetcher_credentials_parse_server_response(
exec_ctx, &r->response, &access_token_md, &token_lifetime);
@@ -222,10 +220,9 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&c->mu);
c->token_fetch_pending = false;
c->access_token_md = GRPC_MDELEM_REF(access_token_md);
- c->token_expiration =
- status == GRPC_CREDENTIALS_OK
- ? gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime)
- : gpr_inf_past(GPR_CLOCK_REALTIME);
+ c->token_expiration = status == GRPC_CREDENTIALS_OK
+ ? grpc_exec_ctx_now(exec_ctx) + token_lifetime
+ : 0;
grpc_oauth2_pending_get_request_metadata *pending_request =
c->pending_requests;
c->pending_requests = NULL;
@@ -260,14 +257,12 @@ static bool oauth2_token_fetcher_get_request_metadata(
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
// Check if we can use the cached token.
- gpr_timespec refresh_threshold = gpr_time_from_seconds(
- GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
+ grpc_millis refresh_threshold =
+ GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC;
grpc_mdelem cached_access_token_md = GRPC_MDNULL;
gpr_mu_lock(&c->mu);
if (!GRPC_MDISNULL(c->access_token_md) &&
- (gpr_time_cmp(
- gpr_time_sub(c->token_expiration, gpr_now(GPR_CLOCK_REALTIME)),
- refresh_threshold) > 0)) {
+ (c->token_expiration + grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) {
cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md);
}
if (!GRPC_MDISNULL(cached_access_token_md)) {
@@ -296,10 +291,10 @@ static bool oauth2_token_fetcher_get_request_metadata(
gpr_mu_unlock(&c->mu);
if (start_fetch) {
grpc_call_credentials_ref(creds);
- c->fetch_func(
- exec_ctx, grpc_credentials_metadata_request_create(creds),
- &c->httpcli_context, &c->pollent, on_oauth2_token_fetcher_http_response,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), refresh_threshold));
+ c->fetch_func(exec_ctx, grpc_credentials_metadata_request_create(creds),
+ &c->httpcli_context, &c->pollent,
+ on_oauth2_token_fetcher_http_response,
+ grpc_exec_ctx_now(exec_ctx) + refresh_threshold);
}
return false;
}
@@ -340,7 +335,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
- c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
+ c->token_expiration = 0;
c->fetch_func = fetch_func;
c->pollent =
grpc_polling_entity_create_from_pollset_set(grpc_pollset_set_create());
@@ -358,7 +353,7 @@ static grpc_call_credentials_vtable compute_engine_vtable = {
static void compute_engine_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
- grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
+ grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
grpc_http_header header = {(char *)"Metadata-Flavor", (char *)"Google"};
grpc_httpcli_request request;
memset(&request, 0, sizeof(grpc_httpcli_request));
@@ -410,7 +405,7 @@ static grpc_call_credentials_vtable refresh_token_vtable = {
static void refresh_token_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
- grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
+ grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
grpc_google_refresh_token_credentials *c =
(grpc_google_refresh_token_credentials *)metadata_req->creds;
grpc_http_header header = {(char *)"Content-Type",
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
index c8a9333417..4beaec93e3 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
@@ -61,7 +61,7 @@ typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *http_context,
grpc_polling_entity *pollent,
grpc_iomgr_cb_func cb,
- gpr_timespec deadline);
+ grpc_millis deadline);
typedef struct grpc_oauth2_pending_get_request_metadata {
grpc_credentials_mdelem_array *md_array;
@@ -74,7 +74,7 @@ typedef struct {
grpc_call_credentials base;
gpr_mu mu;
grpc_mdelem access_token_md;
- gpr_timespec token_expiration;
+ grpc_millis token_expiration;
bool token_fetch_pending;
grpc_oauth2_pending_get_request_metadata *pending_requests;
grpc_httpcli_context httpcli_context;
@@ -104,7 +104,7 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response,
- grpc_mdelem *token_md, gpr_timespec *token_lifetime);
+ grpc_mdelem *token_md, grpc_millis *token_lifetime);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/time_posix.cc b/src/core/lib/support/time_posix.cc
index deccb50975..3267ea6b54 100644
--- a/src/core/lib/support/time_posix.cc
+++ b/src/core/lib/support/time_posix.cc
@@ -30,7 +30,6 @@
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
-#include "src/core/lib/support/block_annotate.h"
static struct timespec timespec_from_gpr(gpr_timespec gts) {
struct timespec rv;
@@ -159,9 +158,7 @@ void gpr_sleep_until(gpr_timespec until) {
delta = gpr_time_sub(until, now);
delta_ts = timespec_from_gpr(delta);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
ns_result = nanosleep(&delta_ts, NULL);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
if (ns_result == 0) {
break;
}
diff --git a/src/core/lib/support/time_windows.cc b/src/core/lib/support/time_windows.cc
index dda7566cd8..08c1b22964 100644
--- a/src/core/lib/support/time_windows.cc
+++ b/src/core/lib/support/time_windows.cc
@@ -28,7 +28,6 @@
#include <process.h>
#include <sys/timeb.h>
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/time_precise.h"
static LARGE_INTEGER g_start_time;
@@ -94,9 +93,7 @@ void gpr_sleep_until(gpr_timespec until) {
sleep_millis =
delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS;
GPR_ASSERT((sleep_millis >= 0) && (sleep_millis <= INT_MAX));
- GRPC_SCHEDULING_START_BLOCKING_REGION;
Sleep((DWORD)sleep_millis);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
}
}
diff --git a/src/core/lib/surface/alarm.cc b/src/core/lib/surface/alarm.cc
index 4e67543191..16a16bfd93 100644
--- a/src/core/lib/surface/alarm.cc
+++ b/src/core/lib/surface/alarm.cc
@@ -126,8 +126,7 @@ void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
grpc_timer_init(&exec_ctx, &alarm->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timespec_to_millis_round_up(deadline), &alarm->on_alarm);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index 6c97f5cc01..8216aa0ec8 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -216,7 +216,7 @@ struct grpc_call {
server, it's trailing metadata */
grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT];
int send_extra_metadata_count;
- gpr_timespec send_deadline;
+ grpc_millis send_deadline;
grpc_slice_buffer_stream sending_stream;
@@ -283,7 +283,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
grpc_error *error);
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
-static void get_final_status(grpc_call *call,
+static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
void (*set_value)(grpc_status_code code,
void *user_data),
void *set_value_user_data, grpc_slice *details);
@@ -372,11 +372,10 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
- call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
}
}
- gpr_timespec send_deadline =
- gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
+ grpc_millis send_deadline = args->send_deadline;
bool immediately_cancel = false;
@@ -394,10 +393,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
- send_deadline = gpr_time_min(
- gpr_convert_clock_type(send_deadline,
- args->parent->send_deadline.clock_type),
- args->parent->send_deadline);
+ send_deadline = GPR_MIN(send_deadline, args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@@ -551,8 +547,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
- get_final_status(c, set_status_value_directly, &c->final_info.final_status,
- NULL);
+ get_final_status(exec_ctx, c, set_status_value_directly,
+ &c->final_info.final_status, NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@@ -738,13 +734,16 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
* FINAL STATUS CODE MANIPULATION
*/
-static bool get_final_status_from(
- grpc_call *call, grpc_error *error, bool allow_ok_status,
- void (*set_value)(grpc_status_code code, void *user_data),
- void *set_value_user_data, grpc_slice *details) {
+static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_error *error, bool allow_ok_status,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data,
+ grpc_slice *details) {
grpc_status_code code;
grpc_slice slice = grpc_empty_slice();
- grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice,
+ NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@@ -756,7 +755,7 @@ static bool get_final_status_from(
return true;
}
-static void get_final_status(grpc_call *call,
+static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
void (*set_value)(grpc_status_code code,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
@@ -781,8 +780,9 @@ static void get_final_status(grpc_call *call,
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set &&
grpc_error_has_clear_grpc_status(status[i].error)) {
- if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
- set_value, set_value_user_data, details)) {
+ if (get_final_status_from(exec_ctx, call, status[i].error,
+ allow_ok_status != 0, set_value,
+ set_value_user_data, details)) {
return;
}
}
@@ -790,8 +790,9 @@ static void get_final_status(grpc_call *call,
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
- if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
- set_value, set_value_user_data, details)) {
+ if (get_final_status_from(exec_ctx, call, status[i].error,
+ allow_ok_status != 0, set_value,
+ set_value_user_data, details)) {
return;
}
}
@@ -1330,17 +1331,22 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
}
if (call->is_client) {
- get_final_status(call, set_status_value_directly,
+ get_final_status(exec_ctx, call, set_status_value_directly,
call->final_op.client.status,
call->final_op.client.status_details);
} else {
- get_final_status(call, set_cancelled_value,
+ get_final_status(exec_ctx, call, set_cancelled_value,
call->final_op.server.cancelled, NULL);
}
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
+ if (error != GRPC_ERROR_NONE && bctl->op.recv_message &&
+ *call->receiving_buffer != NULL) {
+ grpc_byte_buffer_destroy(*call->receiving_buffer);
+ *call->receiving_buffer = NULL;
+ }
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
@@ -1611,11 +1617,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
validate_filtered_metadata(exec_ctx, bctl);
GPR_TIMER_END("validate_filtered_metadata", 0);
- if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
- 0 &&
- !call->is_client) {
- call->send_deadline =
- gpr_convert_clock_type(md->deadline, GPR_CLOCK_MONOTONIC);
+ if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) {
+ call->send_deadline = md->deadline;
}
}
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index c680139cf6..27c2f5243c 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -49,7 +49,7 @@ typedef struct grpc_call_create_args {
grpc_mdelem *add_initial_metadata;
size_t add_initial_metadata_count;
- gpr_timespec send_deadline;
+ grpc_millis send_deadline;
} grpc_call_create_args;
/* Create a new call based on \a args.
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index 59fced7bc4..860dcc82db 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -262,7 +262,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_completion_queue *cq,
grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem,
- grpc_mdelem authority_mdelem, gpr_timespec deadline) {
+ grpc_mdelem authority_mdelem, grpc_millis deadline) {
grpc_mdelem send_metadata[2];
size_t num_metadata = 0;
@@ -308,7 +308,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY,
grpc_slice_ref_internal(*host))
: GRPC_MDNULL,
- deadline);
+ grpc_timespec_to_millis_round_up(deadline));
grpc_exec_ctx_finish(&exec_ctx);
return call;
}
@@ -316,7 +316,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, gpr_timespec deadline, void *reserved) {
+ const grpc_slice *host, grpc_millis deadline, void *reserved) {
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set,
@@ -372,7 +372,8 @@ grpc_call *grpc_channel_create_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL,
- GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), deadline);
+ GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
+ grpc_timespec_to_millis_round_up(deadline));
grpc_exec_ctx_finish(&exec_ctx);
return call;
}
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 427422b565..4d1c7e369f 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -47,7 +47,7 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, gpr_timespec deadline, void *reserved);
+ const grpc_slice *host, grpc_millis deadline, void *reserved);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 453646bd49..36b4b835f8 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -61,8 +61,7 @@ typedef struct {
grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline);
+ grpc_pollset_worker **worker, grpc_millis deadline);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
@@ -100,8 +99,7 @@ static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx,
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_pollset_worker **worker,
- gpr_timespec now,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
non_polling_poller *npp = (non_polling_poller *)pollset;
if (npp->shutdown) return GRPC_ERROR_NONE;
non_polling_worker w;
@@ -115,7 +113,10 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
w.next->prev = w.prev->next = &w;
}
w.kicked = false;
- while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline))
+ gpr_timespec deadline_ts =
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME);
+ while (!npp->shutdown && !w.kicked &&
+ !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts))
;
if (&w == npp->root) {
npp->root = w.next;
@@ -743,7 +744,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq,
typedef struct {
gpr_atm last_seen_things_queued_ever;
grpc_completion_queue *cq;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_cq_completion *stolen_completion;
void *tag; /* for pluck */
bool first_loop;
@@ -772,8 +773,7 @@ static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
return true;
}
}
- return !a->first_loop &&
- gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+ return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
}
#ifndef NDEBUG
@@ -802,7 +802,6 @@ static void dump_pending_tags(grpc_completion_queue *cq) {}
static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
- gpr_timespec now;
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -819,23 +818,20 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
dump_pending_tags(cq);
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
-
GRPC_CQ_INTERNAL_REF(cq, "next");
+ grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
cq_is_finished_arg is_finished_arg = {
-
gpr_atm_no_barrier_load(&cqd->things_queued_ever),
cq,
- deadline,
+ deadline_millis,
NULL,
NULL,
true};
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg);
-
for (;;) {
- gpr_timespec iteration_deadline = deadline;
+ grpc_millis iteration_deadline = deadline_millis;
if (is_finished_arg.stolen_completion != NULL) {
grpc_cq_completion *c = is_finished_arg.stolen_completion;
@@ -862,7 +858,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
attempt at popping. Not doing this can potentially deadlock this
thread forever (if the deadline is infinity) */
if (cq_event_queue_num_items(&cqd->queue) > 0) {
- iteration_deadline = gpr_time_0(GPR_CLOCK_MONOTONIC);
+ iteration_deadline = 0;
}
}
@@ -883,8 +879,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
break;
}
- now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop &&
+ grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cq);
@@ -895,7 +891,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
gpr_mu_lock(cq->mu);
cq->num_polls++;
grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
- NULL, now, iteration_deadline);
+ NULL, iteration_deadline);
gpr_mu_unlock(cq->mu);
if (err != GRPC_ERROR_NONE) {
@@ -1032,8 +1028,7 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
}
gpr_mu_unlock(cq->mu);
}
- return !a->first_loop &&
- gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+ return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
}
static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
@@ -1042,7 +1037,6 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *c;
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
- gpr_timespec now;
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1061,14 +1055,13 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
-
GRPC_CQ_INTERNAL_REF(cq, "pluck");
gpr_mu_lock(cq->mu);
+ grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
cq_is_finished_arg is_finished_arg = {
gpr_atm_no_barrier_load(&cqd->things_queued_ever),
cq,
- deadline,
+ deadline_millis,
NULL,
tag,
true};
@@ -1120,8 +1113,8 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
break;
}
- now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop &&
+ grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
memset(&ret, 0, sizeof(ret));
@@ -1129,10 +1122,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
break;
}
-
cq->num_polls++;
grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
- &worker, now, deadline);
+ &worker, deadline_millis);
if (err != GRPC_ERROR_NONE) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index 6286f9159d..88e26cbeb7 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -74,7 +74,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
mdb->list.head = &calld->status;
mdb->list.tail = &calld->details;
mdb->list.count = 2;
- mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ mdb->deadline = GRPC_MILLIS_INF_FUTURE;
}
static void lame_start_transport_stream_op_batch(
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 1d0fd472d0..dd09cb91de 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -137,7 +137,7 @@ struct call_data {
bool host_set;
grpc_slice path;
grpc_slice host;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_completion_queue *cq_new;
@@ -492,11 +492,13 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
GPR_ASSERT(calld->path_set);
rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
- rc->data.batch.details->deadline = calld->deadline;
+ rc->data.batch.details->deadline =
+ grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
rc->data.batch.details->flags = calld->recv_initial_metadata_flags;
break;
case REGISTERED_CALL:
- *rc->data.registered.deadline = calld->deadline;
+ *rc->data.registered.deadline =
+ grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
if (rc->data.registered.optional_payload) {
*rc->data.registered.optional_payload = calld->payload;
calld->payload = NULL;
@@ -739,7 +741,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)ptr;
call_data *calld = (call_data *)elem->call_data;
- gpr_timespec op_deadline;
+ grpc_millis op_deadline;
if (error == GRPC_ERROR_NONE) {
GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != NULL);
@@ -759,7 +761,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_REF(error);
}
op_deadline = calld->recv_initial_metadata->deadline;
- if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
+ if (op_deadline != GRPC_MILLIS_INF_FUTURE) {
calld->deadline = op_deadline;
}
if (calld->host_set && calld->path_set) {
@@ -833,7 +835,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
- args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ args.send_deadline = GRPC_MILLIS_INF_FUTURE;
grpc_call *call;
grpc_error *error = grpc_call_create(exec_ctx, &args, &call);
grpc_call_element *elem =
@@ -881,7 +883,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
memset(calld, 0, sizeof(call_data));
- calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc
index e7fa0eefe8..6ed427ce5c 100644
--- a/src/core/lib/transport/bdp_estimator.cc
+++ b/src/core/lib/transport/bdp_estimator.cc
@@ -30,8 +30,12 @@ grpc_tracer_flag grpc_bdp_estimator_trace =
void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name) {
estimator->estimate = 65536;
estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED;
+ estimator->ping_start_time = gpr_time_0(GPR_CLOCK_MONOTONIC);
+ estimator->next_ping_scheduled = 0;
estimator->name = name;
estimator->bw_est = 0;
+ estimator->inter_ping_delay = 100.0; // start at 100ms
+ estimator->stable_estimate_count = 0;
}
bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator,
@@ -51,10 +55,11 @@ void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator,
estimator->accumulator += num_bytes;
}
-bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator) {
+bool grpc_bdp_estimator_need_ping(grpc_exec_ctx *exec_ctx,
+ const grpc_bdp_estimator *estimator) {
switch (estimator->ping_state) {
case GRPC_BDP_PING_UNSCHEDULED:
- return true;
+ return grpc_exec_ctx_now(exec_ctx) >= estimator->next_ping_scheduled;
case GRPC_BDP_PING_SCHEDULED:
return false;
case GRPC_BDP_PING_STARTED:
@@ -84,11 +89,13 @@ void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator) {
estimator->ping_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
}
-void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) {
- gpr_timespec dt_ts =
- gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), estimator->ping_start_time);
+void grpc_bdp_estimator_complete_ping(grpc_exec_ctx *exec_ctx,
+ grpc_bdp_estimator *estimator) {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec dt_ts = gpr_time_sub(now, estimator->ping_start_time);
double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
double bw = dt > 0 ? ((double)estimator->accumulator / dt) : 0;
+ int start_inter_ping_delay = estimator->inter_ping_delay;
if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
@@ -105,7 +112,26 @@ void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) {
gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64,
estimator->name, estimator->estimate);
}
+ estimator->inter_ping_delay /= 2; // if the ping estimate changes,
+ // exponentially get faster at probing
+ } else if (estimator->inter_ping_delay < 10000) {
+ estimator->stable_estimate_count++;
+ if (estimator->stable_estimate_count >= 2) {
+ estimator->inter_ping_delay +=
+ 100 +
+ (int)(rand() * 100.0 / RAND_MAX); // if the ping estimate is steady,
+ // slowly ramp down the probe time
+ }
+ }
+ if (start_inter_ping_delay != estimator->inter_ping_delay) {
+ estimator->stable_estimate_count = 0;
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", estimator->name,
+ estimator->inter_ping_delay);
+ }
}
estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED;
estimator->accumulator = 0;
+ estimator->next_ping_scheduled =
+ grpc_exec_ctx_now(exec_ctx) + estimator->inter_ping_delay;
}
diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h
index 21c27ec6af..480d5237b8 100644
--- a/src/core/lib/transport/bdp_estimator.h
+++ b/src/core/lib/transport/bdp_estimator.h
@@ -23,6 +23,7 @@
#include <stdbool.h>
#include <stdint.h>
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#define GRPC_BDP_SAMPLES 16
#define GRPC_BDP_MIN_SAMPLES_FOR_ESTIMATE 3
@@ -43,7 +44,12 @@ typedef struct grpc_bdp_estimator {
grpc_bdp_estimator_ping_state ping_state;
int64_t accumulator;
int64_t estimate;
+ // when was the current ping started?
gpr_timespec ping_start_time;
+ // when should the next ping start?
+ grpc_millis next_ping_scheduled;
+ int inter_ping_delay;
+ int stable_estimate_count;
double bw_est;
const char *name;
} grpc_bdp_estimator;
@@ -59,7 +65,8 @@ bool grpc_bdp_estimator_get_bw(const grpc_bdp_estimator *estimator, double *bw);
void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator,
int64_t num_bytes);
// Returns true if the user should schedule a ping
-bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator);
+bool grpc_bdp_estimator_need_ping(grpc_exec_ctx *exec_ctx,
+ const grpc_bdp_estimator *estimator);
// Schedule a ping: call in response to receiving a true from
// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
// transport (but not necessarily started)
@@ -68,7 +75,8 @@ void grpc_bdp_estimator_schedule_ping(grpc_bdp_estimator *estimator);
// the ping is on the wire
void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator);
// Completes a previously started ping
-void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator);
+void grpc_bdp_estimator_complete_ping(grpc_exec_ctx *exec_ctx,
+ grpc_bdp_estimator *estimator);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc
index 5e3920b627..2e3b61b7ab 100644
--- a/src/core/lib/transport/error_utils.cc
+++ b/src/core/lib/transport/error_utils.cc
@@ -39,8 +39,9 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error,
return NULL;
}
-void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, grpc_slice *slice,
+void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
+ grpc_millis deadline, grpc_status_code *code,
+ grpc_slice *slice,
grpc_http2_error_code *http_error) {
// Start with the parent error and recurse through the tree of children
// until we find the first one that has a status code.
@@ -63,8 +64,8 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
status = (grpc_status_code)integer;
} else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR,
&integer)) {
- status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer,
- deadline);
+ status = grpc_http2_error_to_grpc_status(
+ exec_ctx, (grpc_http2_error_code)integer, deadline);
}
if (code != NULL) *code = status;
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
index 18ff54839c..2c97f9f0bc 100644
--- a/src/core/lib/transport/error_utils.h
+++ b/src/core/lib/transport/error_utils.h
@@ -20,6 +20,7 @@
#define GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/http2_errors.h"
#ifdef __cplusplus
@@ -32,8 +33,9 @@ extern "C" {
/// All attributes are pulled from the same child error. If any of the
/// attributes (code, msg, http_status) are unneeded, they can be passed as
/// NULL.
-void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, grpc_slice *slice,
+void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
+ grpc_millis deadline, grpc_status_code *code,
+ grpc_slice *slice,
grpc_http2_error_code *http_status);
/// A utility function to check whether there is a clear status code that
diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc
index 54388bdcda..2df9c9189c 100644
--- a/src/core/lib/transport/metadata_batch.cc
+++ b/src/core/lib/transport/metadata_batch.cc
@@ -74,7 +74,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
memset(batch, 0, sizeof(*batch));
- batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ batch->deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
@@ -270,9 +270,7 @@ void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx,
}
bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
- return batch->list.head == NULL &&
- gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type),
- batch->deadline) == 0;
+ return batch->list.head == NULL && batch->deadline == GRPC_MILLIS_INF_FUTURE;
}
size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) {
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 63f30a78d1..a2b4b92385 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -51,9 +51,9 @@ typedef struct grpc_metadata_batch {
grpc_mdelem_list list;
grpc_metadata_batch_callouts idx;
/** Used to calculate grpc-timeout at the point of sending,
- or gpr_inf_future if this batch does not need to send a
+ or GRPC_MILLIS_INF_FUTURE if this batch does not need to send a
grpc-timeout */
- gpr_timespec deadline;
+ grpc_millis deadline;
} grpc_metadata_batch;
void grpc_metadata_batch_init(grpc_metadata_batch *batch);
diff --git a/src/core/lib/transport/status_conversion.cc b/src/core/lib/transport/status_conversion.cc
index a40d333284..891c4427d7 100644
--- a/src/core/lib/transport/status_conversion.cc
+++ b/src/core/lib/transport/status_conversion.cc
@@ -37,8 +37,9 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
}
}
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
- gpr_timespec deadline) {
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+ grpc_http2_error_code error,
+ grpc_millis deadline) {
switch (error) {
case GRPC_HTTP2_NO_ERROR:
/* should never be received */
@@ -46,7 +47,7 @@ grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
case GRPC_HTTP2_CANCEL:
/* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
* exceeded */
- return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0
+ return grpc_exec_ctx_now(exec_ctx) > deadline
? GRPC_STATUS_DEADLINE_EXCEEDED
: GRPC_STATUS_CANCELLED;
case GRPC_HTTP2_ENHANCE_YOUR_CALM:
diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h
index b257998e4d..fd58a82cb7 100644
--- a/src/core/lib/transport/status_conversion.h
+++ b/src/core/lib/transport/status_conversion.h
@@ -20,6 +20,7 @@
#define GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H
#include <grpc/grpc.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/http2_errors.h"
#ifdef __cplusplus
@@ -28,8 +29,9 @@ extern "C" {
/* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */
grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status);
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
- gpr_timespec deadline);
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+ grpc_http2_error_code error,
+ grpc_millis deadline);
/* Conversion of HTTP status codes (:status) to grpc status codes */
grpc_status_code grpc_http2_status_to_grpc_status(int status);
diff --git a/src/core/lib/transport/timeout_encoding.cc b/src/core/lib/transport/timeout_encoding.cc
index 02f179d6a3..23a9ef308f 100644
--- a/src/core/lib/transport/timeout_encoding.cc
+++ b/src/core/lib/transport/timeout_encoding.cc
@@ -59,60 +59,27 @@ static void enc_seconds(char *buffer, int64_t sec) {
}
}
-static void enc_nanos(char *buffer, int64_t x) {
+static void enc_millis(char *buffer, int64_t x) {
x = round_up_to_three_sig_figs(x);
- if (x < 100000) {
- if (x % 1000 == 0) {
- enc_ext(buffer, x / 1000, 'u');
- } else {
- enc_ext(buffer, x, 'n');
- }
- } else if (x < 100000000) {
- if (x % 1000000 == 0) {
- enc_ext(buffer, x / 1000000, 'm');
- } else {
- enc_ext(buffer, x / 1000, 'u');
- }
- } else if (x < 1000000000) {
- enc_ext(buffer, x / 1000000, 'm');
+ if (x < GPR_MS_PER_SEC) {
+ enc_ext(buffer, x, 'm');
} else {
- /* note that this is only ever called with times of less than one second,
- so if we reach here the time must have been rounded up to a whole second
- (and no more) */
- memcpy(buffer, "1S", 3);
- }
-}
-
-static void enc_micros(char *buffer, int64_t x) {
- x = round_up_to_three_sig_figs(x);
- if (x < 100000) {
- if (x % 1000 == 0) {
- enc_ext(buffer, x / 1000, 'm');
+ if (x % GPR_MS_PER_SEC == 0) {
+ enc_seconds(buffer, x / GPR_MS_PER_SEC);
} else {
- enc_ext(buffer, x, 'u');
+ enc_ext(buffer, x, 'm');
}
- } else if (x < 100000000) {
- if (x % 1000000 == 0) {
- enc_ext(buffer, x / 1000000, 'S');
- } else {
- enc_ext(buffer, x / 1000, 'm');
- }
- } else {
- enc_ext(buffer, x / 1000000, 'S');
}
}
-void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer) {
- if (timeout.tv_sec < 0) {
+void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer) {
+ if (timeout <= 0) {
enc_tiny(buffer);
- } else if (timeout.tv_sec == 0) {
- enc_nanos(buffer, timeout.tv_nsec);
- } else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) {
- enc_micros(buffer,
- (int64_t)(timeout.tv_sec * 1000000) +
- (timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0)));
+ } else if (timeout < 1000 * GPR_MS_PER_SEC) {
+ enc_millis(buffer, timeout);
} else {
- enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0));
+ enc_seconds(buffer,
+ timeout / GPR_MS_PER_SEC + (timeout % GPR_MS_PER_SEC != 0));
}
}
@@ -121,8 +88,8 @@ static int is_all_whitespace(const char *p, const char *end) {
return p == end;
}
-int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
- int32_t x = 0;
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout) {
+ grpc_millis x = 0;
const uint8_t *p = GRPC_SLICE_START_PTR(text);
const uint8_t *end = GRPC_SLICE_END_PTR(text);
int have_digit = 0;
@@ -136,7 +103,7 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
/* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */
if (x >= (100 * 1000 * 1000)) {
if (x != (100 * 1000 * 1000) || digit != 0) {
- *timeout = gpr_inf_future(GPR_TIMESPAN);
+ *timeout = GRPC_MILLIS_INF_FUTURE;
return 1;
}
}
@@ -150,22 +117,22 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
/* decode unit specifier */
switch (*p) {
case 'n':
- *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN);
+ *timeout = x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0);
break;
case 'u':
- *timeout = gpr_time_from_micros(x, GPR_TIMESPAN);
+ *timeout = x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0);
break;
case 'm':
- *timeout = gpr_time_from_millis(x, GPR_TIMESPAN);
+ *timeout = x;
break;
case 'S':
- *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN);
+ *timeout = x * GPR_MS_PER_SEC;
break;
case 'M':
- *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN);
+ *timeout = x * 60 * GPR_MS_PER_SEC;
break;
case 'H':
- *timeout = gpr_time_from_hours(x, GPR_TIMESPAN);
+ *timeout = x * 60 * 60 * GPR_MS_PER_SEC;
break;
default:
return 0;
diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h
index 1f4e206f8a..25cb663959 100644
--- a/src/core/lib/transport/timeout_encoding.h
+++ b/src/core/lib/transport/timeout_encoding.h
@@ -22,6 +22,7 @@
#include <grpc/slice.h>
#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/string.h"
#define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1)
@@ -32,8 +33,8 @@ extern "C" {
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
-void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer);
-int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout);
+void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer);
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc
index 87fdf72e29..cc11b0cc49 100644
--- a/src/core/lib/transport/transport_op_string.cc
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -51,10 +51,9 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
- if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
+ if (md.deadline != GRPC_MILLIS_INF_FUTURE) {
char *tmp;
- gpr_asprintf(&tmp, " deadline=%" PRId64 ".%09d", md.deadline.tv_sec,
- md.deadline.tv_nsec);
+ gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline);
gpr_strvec_add(b, tmp);
}
}
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 7b9fd6424d..140f4ceee1 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -21,7 +21,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/support/arena.cc',
'src/core/lib/support/atm.cc',
'src/core/lib/support/avl.cc',
- 'src/core/lib/support/backoff.cc',
'src/core/lib/support/cmdline.cc',
'src/core/lib/support/cpu_iphone.cc',
'src/core/lib/support/cpu_linux.cc',
@@ -62,6 +61,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/support/tmpfile_windows.cc',
'src/core/lib/support/wrap_memcpy.cc',
'src/core/lib/surface/init.cc',
+ 'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
diff --git a/test/core/backoff/BUILD b/test/core/backoff/BUILD
new file mode 100644
index 0000000000..4ae762007c
--- /dev/null
+++ b/test/core/backoff/BUILD
@@ -0,0 +1,36 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_cc_binary")
+
+licenses(["notice"]) # Apache v2
+
+package(
+ features = [
+ "-layering_check",
+ "-parse_headers",
+ ],
+)
+
+grpc_cc_test(
+ name = "backoff_test",
+ srcs = ["backoff_test.c"],
+ language = "C",
+ deps = [
+ "//:grpc",
+ "//test/core/util:grpc_test_util",
+ "//:gpr",
+ "//test/core/util:gpr_test_util",
+ ],
+)
diff --git a/test/core/backoff/backoff_test.c b/test/core/backoff/backoff_test.c
new file mode 100644
index 0000000000..3848b2a54d
--- /dev/null
+++ b/test/core/backoff/backoff_test.c
@@ -0,0 +1,149 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/backoff/backoff.h"
+
+#include <grpc/support/log.h>
+
+#include "test/core/util/test_config.h"
+
+static void test_constant_backoff(void) {
+ grpc_backoff backoff;
+ grpc_backoff_init(&backoff, 200 /* initial timeout */, 1.0 /* multiplier */,
+ 0.0 /* jitter */, 100 /* min timeout */,
+ 1000 /* max timeout */);
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff);
+ GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200);
+ for (int i = 0; i < 10000; i++) {
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200);
+ exec_ctx.now = next;
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void test_min_connect(void) {
+ grpc_backoff backoff;
+ grpc_backoff_init(&backoff, 100 /* initial timeout */, 1.0 /* multiplier */,
+ 0.0 /* jitter */, 200 /* min timeout */,
+ 1000 /* max timeout */);
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff);
+ GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 200);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void test_no_jitter_backoff(void) {
+ grpc_backoff backoff;
+ grpc_backoff_init(&backoff, 2 /* initial timeout */, 2.0 /* multiplier */,
+ 0.0 /* jitter */, 1 /* min timeout */,
+ 513 /* max timeout */);
+ // x_1 = 2
+ // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 )
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ exec_ctx.now = 0;
+ exec_ctx.now_is_valid = true;
+ grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 2);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 6);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 14);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 30);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 62);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 126);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 254);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 510);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 1022);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ // Hit the maximum timeout. From this point onwards, retries will increase
+ // only by max timeout.
+ GPR_ASSERT(next == 1535);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 2048);
+ exec_ctx.now = next;
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+ GPR_ASSERT(next == 2561);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void test_jitter_backoff(void) {
+ const int64_t initial_timeout = 500;
+ const double jitter = 0.1;
+ grpc_backoff backoff;
+ grpc_backoff_init(&backoff, initial_timeout, 1.0 /* multiplier */, jitter,
+ 100 /* min timeout */, 1000 /* max timeout */);
+
+ backoff.rng_state = 0; // force consistent PRNG
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis next = grpc_backoff_begin(&exec_ctx, &backoff);
+ GPR_ASSERT(next - grpc_exec_ctx_now(&exec_ctx) == 500);
+
+ int64_t expected_next_lower_bound =
+ (int64_t)((double)initial_timeout * (1 - jitter));
+ int64_t expected_next_upper_bound =
+ (int64_t)((double)initial_timeout * (1 + jitter));
+
+ for (int i = 0; i < 10000; i++) {
+ next = grpc_backoff_step(&exec_ctx, &backoff);
+
+ // next-now must be within (jitter*100)% of the previous timeout.
+ const int64_t timeout_millis = next - grpc_exec_ctx_now(&exec_ctx);
+ GPR_ASSERT(timeout_millis >= expected_next_lower_bound);
+ GPR_ASSERT(timeout_millis <= expected_next_upper_bound);
+
+ expected_next_lower_bound =
+ (int64_t)((double)timeout_millis * (1 - jitter));
+ expected_next_upper_bound =
+ (int64_t)((double)timeout_millis * (1 + jitter));
+ exec_ctx.now = next;
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+int main(int argc, char **argv) {
+ grpc_test_init(argc, argv);
+ gpr_time_init();
+
+ test_constant_backoff();
+ test_min_connect();
+ test_no_jitter_backoff();
+ test_jitter_backoff();
+
+ return 0;
+}
diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c
index fff0c793ed..b7b28a9ac4 100644
--- a/test/core/bad_client/bad_client.c
+++ b/test/core/bad_client/bad_client.c
@@ -84,13 +84,18 @@ void grpc_run_bad_client_test(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_completion_queue *shutdown_cq;
- hex = gpr_dump(client_payload, client_payload_length,
- GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ if (client_payload_length < 4 * 1024) {
+ hex = gpr_dump(client_payload, client_payload_length,
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
- /* Add a debug log */
- gpr_log(GPR_INFO, "TEST: %s", hex);
+ /* Add a debug log */
+ gpr_log(GPR_INFO, "TEST: %s", hex);
- gpr_free(hex);
+ gpr_free(hex);
+ } else {
+ gpr_log(GPR_INFO, "TEST: (%" PRIdPTR " byte long string)",
+ client_payload_length);
+ }
/* Init grpc */
grpc_init();
diff --git a/test/core/bad_client/tests/window_overflow.c b/test/core/bad_client/tests/window_overflow.c
index 18c647ad8a..e4b5f9711b 100644
--- a/test/core/bad_client/tests/window_overflow.c
+++ b/test/core/bad_client/tests/window_overflow.c
@@ -69,7 +69,7 @@ int main(int argc, char **argv) {
#define MAX_FRAME_SIZE 16384
#define MESSAGES_PER_FRAME (MAX_FRAME_SIZE / 5)
#define FRAME_SIZE (MESSAGES_PER_FRAME * 5)
-#define SEND_SIZE (100 * 1024)
+#define SEND_SIZE (6 * 1024 * 1024)
#define NUM_FRAMES (SEND_SIZE / FRAME_SIZE + 1)
grpc_test_init(argc, argv);
diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c
index 7c3614b4a2..a07ef89ba8 100644
--- a/test/core/channel/channel_stack_test.c
+++ b/test/core/channel/channel_stack_test.c
@@ -125,7 +125,7 @@ static void test_create_channel_stack(void) {
.context = NULL,
.path = path,
.start_time = gpr_now(GPR_CLOCK_MONOTONIC),
- .deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC),
+ .deadline = GRPC_MILLIS_INF_FUTURE,
.arena = NULL};
grpc_error *error = grpc_call_stack_init(&exec_ctx, channel_stack, 1,
free_call, call_stack, &args);
diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
index 364e180963..4597285063 100644
--- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
+++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
@@ -106,7 +106,7 @@ static bool wait_loop(int deadline_seconds, gpr_event *ev) {
deadline_seconds--;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_timer_check(&exec_ctx, gpr_now(GPR_CLOCK_MONOTONIC), NULL);
+ grpc_timer_check(&exec_ctx, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
return false;
diff --git a/test/core/end2end/fixtures/h2_ssl_cert.c b/test/core/end2end/fixtures/h2_ssl_cert.c
index 9b1ddadfe4..f0a2ee5430 100644
--- a/test/core/end2end/fixtures/h2_ssl_cert.c
+++ b/test/core/end2end/fixtures/h2_ssl_cert.c
@@ -319,7 +319,7 @@ static void simple_request_body(grpc_end2end_test_fixture f,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
- op->flags = 0;
+ op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c
index a4cfc77bcb..d29401fdc3 100644
--- a/test/core/end2end/fixtures/http_proxy_fixture.c
+++ b/test/core/end2end/fixtures/http_proxy_fixture.c
@@ -412,8 +412,8 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
GPR_ASSERT(resolved_addresses->naddrs >= 1);
// Connect to requested address.
// The connection callback inherits our reference to conn.
- const gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(10, GPR_TIMESPAN));
+ const grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + 10 * GPR_MS_PER_SEC;
grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done,
&conn->server_endpoint, conn->pollset_set, NULL,
&resolved_addresses->addrs[0], deadline);
@@ -469,14 +469,12 @@ static void thread_main(void* arg) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
do {
gpr_ref(&proxy->users);
- const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- const gpr_timespec deadline =
- gpr_time_add(now, gpr_time_from_seconds(1, GPR_TIMESPAN));
grpc_pollset_worker* worker = NULL;
gpr_mu_lock(proxy->mu);
GRPC_LOG_IF_ERROR(
"grpc_pollset_work",
- grpc_pollset_work(&exec_ctx, proxy->pollset, &worker, now, deadline));
+ grpc_pollset_work(&exec_ctx, proxy->pollset, &worker,
+ grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC));
gpr_mu_unlock(proxy->mu);
grpc_exec_ctx_flush(&exec_ctx);
} while (!gpr_unref(&proxy->users));
diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c
index 1228c9fe9a..0a787bbf30 100644
--- a/test/core/end2end/fuzzers/api_fuzzer.c
+++ b/test/core/end2end/fuzzers/api_fuzzer.c
@@ -60,7 +60,9 @@ extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
static gpr_timespec now_impl(gpr_clock_type clock_type) {
GPR_ASSERT(clock_type != GPR_TIMESPAN);
- return g_now;
+ gpr_timespec ts = g_now;
+ ts.clock_type = clock_type;
+ return ts;
}
////////////////////////////////////////////////////////////////////////////////
@@ -407,10 +409,8 @@ void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
r->addrs = addresses;
r->lb_addrs = NULL;
grpc_timer_init(
- exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(1, GPR_TIMESPAN)),
- GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx),
- gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+ GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx));
}
grpc_ares_request *my_dns_lookup_ares(
@@ -424,10 +424,8 @@ grpc_ares_request *my_dns_lookup_ares(
r->addrs = NULL;
r->lb_addrs = lb_addrs;
grpc_timer_init(
- exec_ctx, &r->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(1, GPR_TIMESPAN)),
- GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx),
- gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+ GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx));
return NULL;
}
@@ -487,10 +485,8 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
fc->ep = ep;
fc->deadline = deadline;
grpc_timer_init(
- exec_ctx, &fc->timer, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(1, GPR_TIMESPAN)),
- GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx),
- gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx, &fc->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+ GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx));
}
static void my_tcp_client_connect(grpc_exec_ctx *exec_ctx,
diff --git a/test/core/end2end/invalid_call_argument_test.c b/test/core/end2end/invalid_call_argument_test.c
index bf0d08adec..e3fd5a8fbe 100644
--- a/test/core/end2end/invalid_call_argument_test.c
+++ b/test/core/end2end/invalid_call_argument_test.c
@@ -92,7 +92,7 @@ static void prepare_test(int is_client) {
op = g_state.ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
- op->flags = 0;
+ op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY;
op->reserved = NULL;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(g_state.call, g_state.ops,
diff --git a/test/core/end2end/tests/bad_ping.c b/test/core/end2end/tests/bad_ping.c
index c97d11b306..d442f12480 100644
--- a/test/core/end2end/tests/bad_ping.c
+++ b/test/core/end2end/tests/bad_ping.c
@@ -155,14 +155,16 @@ static void test_bad_ping(grpc_end2end_test_config config) {
cq_verify(cqv);
// Send too many pings to the server to trigger the punishment:
- // The first ping is sent after data frames, it won't trigger a ping strike.
- // Each of the following pings will trigger a ping strike, and we need at
- // least (MAX_PING_STRIKES + 1) strikes to trigger the punishment. So
- // (MAX_PING_STRIKES + 2) pings are needed here.
+ // Each ping will trigger a ping strike, and we need at least MAX_PING_STRIKES
+ // strikes to trigger the punishment. So (MAX_PING_STRIKES + 1) pings are
+ // needed here.
int i;
- for (i = 200; i < 202 + MAX_PING_STRIKES; i++) {
- grpc_channel_ping(f.client, f.cq, tag(i), NULL);
- CQ_EXPECT_COMPLETION(cqv, tag(i), 1);
+ for (i = 1; i <= MAX_PING_STRIKES + 1; i++) {
+ grpc_channel_ping(f.client, f.cq, tag(200 + i), NULL);
+ CQ_EXPECT_COMPLETION(cqv, tag(200 + i), 1);
+ if (i == MAX_PING_STRIKES + 1) {
+ CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+ }
cq_verify(cqv);
}
@@ -190,7 +192,6 @@ static void test_bad_ping(grpc_end2end_test_config config) {
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
- CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead));
diff --git a/test/core/end2end/tests/keepalive_timeout.c b/test/core/end2end/tests/keepalive_timeout.c
index 8d01f23c00..c4280149c7 100644
--- a/test/core/end2end/tests/keepalive_timeout.c
+++ b/test/core/end2end/tests/keepalive_timeout.c
@@ -98,21 +98,21 @@ static void test_keepalive_timeout(grpc_end2end_test_config config) {
grpc_byte_buffer *response_payload =
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
- grpc_arg keepalive_args[] = {{.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_KEEPALIVE_TIME_MS,
- .value.integer = 1500},
- {.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_KEEPALIVE_TIMEOUT_MS,
- .value.integer = 0},
- {.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_BDP_PROBE,
- .value.integer = 0}};
-
- grpc_channel_args client_args = {.num_args = GPR_ARRAY_SIZE(keepalive_args),
- .args = keepalive_args};
+ grpc_arg keepalive_arg_elems[] = {{.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_KEEPALIVE_TIME_MS,
+ .value.integer = 1500},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_KEEPALIVE_TIMEOUT_MS,
+ .value.integer = 0},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_BDP_PROBE,
+ .value.integer = 0}};
+ grpc_channel_args keepalive_args = {
+ .num_args = GPR_ARRAY_SIZE(keepalive_arg_elems),
+ .args = keepalive_arg_elems};
grpc_end2end_test_fixture f =
- begin_test(config, "keepalive_timeout", &client_args, NULL);
+ begin_test(config, "keepalive_timeout", &keepalive_args, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c
index 8a53903763..cc1c16d695 100644
--- a/test/core/http/httpcli_test.c
+++ b/test/core/http/httpcli_test.c
@@ -35,8 +35,9 @@ static grpc_httpcli_context g_context;
static gpr_mu *g_mu;
static grpc_polling_entity g_pops;
-static gpr_timespec n_seconds_time(int seconds) {
- return grpc_timeout_seconds_to_deadline(seconds);
+static grpc_millis n_seconds_time(int seconds) {
+ return grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(seconds));
}
static void on_finish(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -86,8 +87,7 @@ static void test_get(int port) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- n_seconds_time(1))));
+ &worker, n_seconds_time(1))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -128,8 +128,7 @@ static void test_post(int port) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- n_seconds_time(1))));
+ &worker, n_seconds_time(1))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c
index c7455bd8df..f8a3cfdd76 100644
--- a/test/core/http/httpscli_test.c
+++ b/test/core/http/httpscli_test.c
@@ -35,8 +35,9 @@ static grpc_httpcli_context g_context;
static gpr_mu *g_mu;
static grpc_polling_entity g_pops;
-static gpr_timespec n_seconds_time(int seconds) {
- return grpc_timeout_seconds_to_deadline(seconds);
+static grpc_millis n_seconds_time(int seconds) {
+ return grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(seconds));
}
static void on_finish(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -87,8 +88,7 @@ static void test_get(int port) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- n_seconds_time(1))));
+ &worker, n_seconds_time(1))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -130,8 +130,7 @@ static void test_post(int port) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- n_seconds_time(1))));
+ &worker, n_seconds_time(1))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c
index f8570edde7..61e901f645 100644
--- a/test/core/iomgr/endpoint_tests.c
+++ b/test/core/iomgr/endpoint_tests.c
@@ -176,10 +176,11 @@ static void read_and_write_test(grpc_endpoint_test_config config,
size_t num_bytes, size_t write_size,
size_t slice_size, bool shutdown) {
struct read_and_write_test_state state;
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
grpc_endpoint_test_fixture f =
begin_test(config, "read_and_write_test", slice_size);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
gpr_log(GPR_DEBUG, "num_bytes=%" PRIuPTR " write_size=%" PRIuPTR
" slice_size=%" PRIuPTR " shutdown=%d",
num_bytes, write_size, slice_size, shutdown);
@@ -235,11 +236,10 @@ static void read_and_write_test(grpc_endpoint_test_config config,
gpr_mu_lock(g_mu);
while (!state.read_done || !state.write_done) {
grpc_pollset_worker *worker = NULL;
- GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
+ GPR_ASSERT(grpc_exec_ctx_now(&exec_ctx) < deadline);
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
}
gpr_mu_unlock(g_mu);
grpc_exec_ctx_flush(&exec_ctx);
@@ -265,14 +265,14 @@ static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count,
int want_fail_count) {
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(g_mu);
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10);
- while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 &&
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
+ while (grpc_exec_ctx_now(exec_ctx) < deadline &&
*fail_count < want_fail_count) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(exec_ctx, g_pollset, &worker,
- gpr_now(deadline.clock_type), deadline)));
+ grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(g_mu);
diff --git a/test/core/iomgr/ev_epollsig_linux_test.c b/test/core/iomgr/ev_epollsig_linux_test.c
index cca07bf002..37aadacd49 100644
--- a/test/core/iomgr/ev_epollsig_linux_test.c
+++ b/test/core/iomgr/ev_epollsig_linux_test.c
@@ -238,10 +238,8 @@ static void test_threading_loop(void *arg) {
grpc_pollset_worker *worker;
gpr_mu_lock(shared->mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, shared->pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))));
+ "pollset_work", grpc_pollset_work(&exec_ctx, shared->pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)));
gpr_mu_unlock(shared->mu);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c
index 881277a8d6..1c62f34d3e 100644
--- a/test/core/iomgr/fd_posix_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -252,10 +252,8 @@ static void server_wait_and_shutdown(server *sv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))));
+ "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -371,10 +369,8 @@ static void client_wait_and_shutdown(client *cl) {
grpc_pollset_worker *worker = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))));
+ "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -477,10 +473,8 @@ static void test_grpc_fd_change(void) {
while (a.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))));
+ "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -503,10 +497,8 @@ static void test_grpc_fd_change(void) {
while (b.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))));
+ "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c
index 5750ac0f4b..3dd4bc887c 100644
--- a/test/core/iomgr/pollset_set_test.c
+++ b/test/core/iomgr/pollset_set_test.c
@@ -203,7 +203,7 @@ static void pollset_set_test_basic() {
*/
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker;
- gpr_timespec deadline;
+ grpc_millis deadline;
test_fd tfds[10];
test_pollset pollsets[3];
@@ -256,10 +256,10 @@ static void pollset_set_test_basic() {
make_test_fds_readable(tfds, num_fds);
gpr_mu_lock(pollsets[i].mu);
- deadline = grpc_timeout_milliseconds_to_deadline(2);
+ deadline = grpc_timespec_to_millis_round_up(
+ grpc_timeout_milliseconds_to_deadline(2));
GPR_ASSERT(GRPC_ERROR_NONE ==
- grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline));
+ grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker, deadline));
gpr_mu_unlock(pollsets[i].mu);
grpc_exec_ctx_flush(&exec_ctx);
@@ -308,7 +308,7 @@ void pollset_set_test_dup_fds() {
*/
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker;
- gpr_timespec deadline;
+ grpc_millis deadline;
test_fd tfds[3];
test_pollset pollset;
@@ -338,10 +338,10 @@ void pollset_set_test_dup_fds() {
make_test_fds_readable(tfds, num_fds);
gpr_mu_lock(pollset.mu);
- deadline = grpc_timeout_milliseconds_to_deadline(2);
+ deadline = grpc_timespec_to_millis_round_up(
+ grpc_timeout_milliseconds_to_deadline(2));
GPR_ASSERT(GRPC_ERROR_NONE ==
- grpc_pollset_work(&exec_ctx, pollset.ps, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline));
+ grpc_pollset_work(&exec_ctx, pollset.ps, &worker, deadline));
gpr_mu_unlock(pollset.mu);
grpc_exec_ctx_flush(&exec_ctx);
@@ -381,7 +381,7 @@ void pollset_set_test_empty_pollset() {
*/
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker;
- gpr_timespec deadline;
+ grpc_millis deadline;
test_fd tfds[3];
test_pollset pollsets[2];
@@ -407,10 +407,10 @@ void pollset_set_test_empty_pollset() {
make_test_fds_readable(tfds, num_fds);
gpr_mu_lock(pollsets[0].mu);
- deadline = grpc_timeout_milliseconds_to_deadline(2);
+ deadline = grpc_timespec_to_millis_round_up(
+ grpc_timeout_milliseconds_to_deadline(2));
GPR_ASSERT(GRPC_ERROR_NONE ==
- grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline));
+ grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker, deadline));
gpr_mu_unlock(pollsets[0].mu);
grpc_exec_ctx_flush(&exec_ctx);
diff --git a/test/core/iomgr/resolve_address_posix_test.c b/test/core/iomgr/resolve_address_posix_test.c
index e4be99f03c..cb9d6080fb 100644
--- a/test/core/iomgr/resolve_address_posix_test.c
+++ b/test/core/iomgr/resolve_address_posix_test.c
@@ -72,35 +72,33 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
gpr_free(args->pollset);
}
-static gpr_timespec n_sec_deadline(int seconds) {
- return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(seconds, GPR_TIMESPAN));
+static grpc_millis n_sec_deadline(int seconds) {
+ return grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(seconds));
}
static void actually_poll(void *argsp) {
args_struct *args = argsp;
- gpr_timespec deadline = n_sec_deadline(10);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis deadline = n_sec_deadline(10);
while (true) {
bool done = gpr_atm_acq_load(&args->done_atm) != 0;
if (done) {
break;
}
- gpr_timespec time_left =
- gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME));
- gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done,
- time_left.tv_sec, time_left.tv_nsec);
- GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0);
+ grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx);
+ gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
+ GPR_ASSERT(time_left >= 0);
grpc_pollset_worker *worker = NULL;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(args->mu);
- GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, args->pollset, &worker,
- gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1)));
+ GRPC_LOG_IF_ERROR("pollset_work",
+ grpc_pollset_work(&exec_ctx, args->pollset, &worker,
+ n_sec_deadline(1)));
gpr_mu_unlock(args->mu);
- grpc_exec_ctx_finish(&exec_ctx);
+ grpc_exec_ctx_flush(&exec_ctx);
}
gpr_event_set(&args->ev, (void *)1);
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void poll_pollset_until_request_done(args_struct *args) {
diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
index 1110c04b6e..178bbbb95f 100644
--- a/test/core/iomgr/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -68,34 +68,32 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
gpr_free(args->pollset);
}
-static gpr_timespec n_sec_deadline(int seconds) {
- return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(seconds, GPR_TIMESPAN));
+static grpc_millis n_sec_deadline(int seconds) {
+ return grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(seconds));
}
static void poll_pollset_until_request_done(args_struct *args) {
- gpr_timespec deadline = n_sec_deadline(10);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis deadline = n_sec_deadline(10);
while (true) {
bool done = gpr_atm_acq_load(&args->done_atm) != 0;
if (done) {
break;
}
- gpr_timespec time_left =
- gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME));
- gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done,
- time_left.tv_sec, time_left.tv_nsec);
- GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0);
+ grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx);
+ gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
+ GPR_ASSERT(time_left >= 0);
grpc_pollset_worker *worker = NULL;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(args->mu);
- GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, args->pollset, &worker,
- gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1)));
+ GRPC_LOG_IF_ERROR("pollset_work",
+ grpc_pollset_work(&exec_ctx, args->pollset, &worker,
+ n_sec_deadline(1)));
gpr_mu_unlock(args->mu);
- grpc_exec_ctx_finish(&exec_ctx);
+ grpc_exec_ctx_flush(&exec_ctx);
}
gpr_event_set(&args->ev, (void *)1);
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp,
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
index 1032da942b..b8b76d1c42 100644
--- a/test/core/iomgr/tcp_client_posix_test.c
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -46,8 +46,8 @@ static grpc_pollset *g_pollset;
static int g_connections_complete = 0;
static grpc_endpoint *g_connecting = NULL;
-static gpr_timespec test_deadline(void) {
- return grpc_timeout_seconds_to_deadline(10);
+static grpc_millis test_deadline(void) {
+ return grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
}
static void finish_connection() {
@@ -109,7 +109,7 @@ void test_succeeds(void) {
(socklen_t *)&resolved_addr.len) == 0);
GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
- &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
+ &resolved_addr, GRPC_MILLIS_INF_FUTURE);
/* await the connection */
do {
@@ -127,8 +127,8 @@ void test_succeeds(void) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- grpc_timeout_seconds_to_deadline(5))));
+ grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(5)))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -159,25 +159,24 @@ void test_fails(void) {
/* connect to a broken address */
GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
- &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
+ &resolved_addr, GRPC_MILLIS_INF_FUTURE);
gpr_mu_lock(g_mu);
/* wait for the connection callback to finish */
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker *worker = NULL;
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec polling_deadline = test_deadline();
- switch (grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
+ grpc_millis polling_deadline = test_deadline();
+ switch (grpc_timer_check(&exec_ctx, &polling_deadline)) {
case GRPC_TIMERS_FIRED:
break;
case GRPC_TIMERS_NOT_CHECKED:
- polling_deadline = now;
+ polling_deadline = 0;
/* fall through */
case GRPC_TIMERS_CHECKED_AND_EMPTY:
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- now, polling_deadline)));
+ polling_deadline)));
break;
}
gpr_mu_unlock(g_mu);
diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c
index cfb3cf897c..6501160c6f 100644
--- a/test/core/iomgr/tcp_posix_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -162,7 +162,8 @@ static void read_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint *ep;
struct read_socket_state state;
size_t written_bytes;
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_INFO, "Read test of size %" PRIuPTR ", slice size %" PRIuPTR,
@@ -194,8 +195,7 @@ static void read_test(size_t num_bytes, size_t slice_size) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -215,7 +215,8 @@ static void large_read_test(size_t slice_size) {
grpc_endpoint *ep;
struct read_socket_state state;
ssize_t written_bytes;
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);
@@ -246,8 +247,7 @@ static void large_read_test(size_t slice_size) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -319,8 +319,8 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- grpc_timeout_milliseconds_to_deadline(10))));
+ grpc_timespec_to_millis_round_up(
+ grpc_timeout_milliseconds_to_deadline(10)))));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
do {
@@ -353,7 +353,8 @@ static void write_test(size_t num_bytes, size_t slice_size) {
uint8_t current_data = 0;
grpc_slice_buffer outgoing;
grpc_closure write_done_closure;
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_INFO,
@@ -390,8 +391,7 @@ static void write_test(size_t num_bytes, size_t slice_size) {
}
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
@@ -419,7 +419,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
struct read_socket_state state;
size_t written_bytes;
int fd;
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure fd_released_cb;
int fd_released_done = 0;
@@ -457,8 +458,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
state.read_bytes, state.target_read_bytes);
gpr_mu_unlock(g_mu);
@@ -476,8 +476,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
}
gpr_mu_unlock(g_mu);
diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c
index 4d84608376..782dfb413a 100644
--- a/test/core/iomgr/tcp_server_posix_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -230,7 +230,8 @@ static void test_no_op_with_port_and_start(void) {
static grpc_error *tcp_connect(grpc_exec_ctx *exec_ctx, const test_addr *remote,
on_connect_result *result) {
- gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10);
+ grpc_millis deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
int clifd;
int nconnects_before;
const struct sockaddr *remote_addr =
@@ -253,11 +254,10 @@ static grpc_error *tcp_connect(grpc_exec_ctx *exec_ctx, const test_addr *remote,
}
gpr_log(GPR_DEBUG, "wait");
while (g_nconnects == nconnects_before &&
- gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
+ deadline > grpc_exec_ctx_now(exec_ctx)) {
grpc_pollset_worker *worker = NULL;
grpc_error *err;
- if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)) !=
+ if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline)) !=
GRPC_ERROR_NONE) {
gpr_mu_unlock(g_mu);
close(clifd);
diff --git a/test/core/iomgr/timer_list_test.c b/test/core/iomgr/timer_list_test.c
index 5f8b01fdc4..c3d9f9d88d 100644
--- a/test/core/iomgr/timer_list_test.c
+++ b/test/core/iomgr/timer_list_test.c
@@ -41,51 +41,45 @@ static void cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void add_test(void) {
- gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME);
int i;
grpc_timer timers[20];
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_INFO, "add_test");
- grpc_timer_list_init(start);
+ grpc_timer_list_init(&exec_ctx);
grpc_timer_trace.value = 1;
grpc_timer_check_trace.value = 1;
memset(cb_called, 0, sizeof(cb_called));
+ grpc_millis start = grpc_exec_ctx_now(&exec_ctx);
+
/* 10 ms timers. will expire in the current epoch */
for (i = 0; i < 10; i++) {
- grpc_timer_init(
- &exec_ctx, &timers[i],
- gpr_time_add(start, gpr_time_from_millis(10, GPR_TIMESPAN)),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, grpc_schedule_on_exec_ctx),
- start);
+ grpc_timer_init(&exec_ctx, &timers[i], start + 10,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i,
+ grpc_schedule_on_exec_ctx));
}
/* 1010 ms timers. will expire in the next epoch */
for (i = 10; i < 20; i++) {
- grpc_timer_init(
- &exec_ctx, &timers[i],
- gpr_time_add(start, gpr_time_from_millis(1010, GPR_TIMESPAN)),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i, grpc_schedule_on_exec_ctx),
- start);
+ grpc_timer_init(&exec_ctx, &timers[i], start + 1010,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)i,
+ grpc_schedule_on_exec_ctx));
}
/* collect timers. Only the first batch should be ready. */
- GPR_ASSERT(grpc_timer_check(
- &exec_ctx,
- gpr_time_add(start, gpr_time_from_millis(500, GPR_TIMESPAN)),
- NULL) == GRPC_TIMERS_FIRED);
+ exec_ctx.now = start + 500;
+ GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED);
grpc_exec_ctx_finish(&exec_ctx);
for (i = 0; i < 20; i++) {
GPR_ASSERT(cb_called[i][1] == (i < 10));
GPR_ASSERT(cb_called[i][0] == 0);
}
- GPR_ASSERT(grpc_timer_check(
- &exec_ctx,
- gpr_time_add(start, gpr_time_from_millis(600, GPR_TIMESPAN)),
- NULL) == GRPC_TIMERS_CHECKED_AND_EMPTY);
+ exec_ctx.now = start + 600;
+ GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) ==
+ GRPC_TIMERS_CHECKED_AND_EMPTY);
grpc_exec_ctx_finish(&exec_ctx);
for (i = 0; i < 30; i++) {
GPR_ASSERT(cb_called[i][1] == (i < 10));
@@ -93,20 +87,17 @@ static void add_test(void) {
}
/* collect the rest of the timers */
- GPR_ASSERT(grpc_timer_check(
- &exec_ctx,
- gpr_time_add(start, gpr_time_from_millis(1500, GPR_TIMESPAN)),
- NULL) == GRPC_TIMERS_FIRED);
+ exec_ctx.now = start + 1500;
+ GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED);
grpc_exec_ctx_finish(&exec_ctx);
for (i = 0; i < 30; i++) {
GPR_ASSERT(cb_called[i][1] == (i < 20));
GPR_ASSERT(cb_called[i][0] == 0);
}
- GPR_ASSERT(grpc_timer_check(
- &exec_ctx,
- gpr_time_add(start, gpr_time_from_millis(1600, GPR_TIMESPAN)),
- NULL) == GRPC_TIMERS_CHECKED_AND_EMPTY);
+ exec_ctx.now = start + 1600;
+ GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) ==
+ GRPC_TIMERS_CHECKED_AND_EMPTY);
for (i = 0; i < 30; i++) {
GPR_ASSERT(cb_called[i][1] == (i < 20));
GPR_ASSERT(cb_called[i][0] == 0);
@@ -116,10 +107,6 @@ static void add_test(void) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static gpr_timespec tfm(int m) {
- return gpr_time_from_millis(m, GPR_CLOCK_REALTIME);
-}
-
/* Cleaning up a list with pending timers. */
void destruction_test(void) {
grpc_timer timers[5];
@@ -127,32 +114,30 @@ void destruction_test(void) {
gpr_log(GPR_INFO, "destruction_test");
- grpc_timer_list_init(gpr_time_0(GPR_CLOCK_REALTIME));
+ exec_ctx.now_is_valid = true;
+ exec_ctx.now = 0;
+ grpc_timer_list_init(&exec_ctx);
grpc_timer_trace.value = 1;
grpc_timer_check_trace.value = 1;
memset(cb_called, 0, sizeof(cb_called));
grpc_timer_init(
- &exec_ctx, &timers[0], tfm(100),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)0, grpc_schedule_on_exec_ctx),
- gpr_time_0(GPR_CLOCK_REALTIME));
+ &exec_ctx, &timers[0], 100,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)0, grpc_schedule_on_exec_ctx));
grpc_timer_init(
- &exec_ctx, &timers[1], tfm(3),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)1, grpc_schedule_on_exec_ctx),
- gpr_time_0(GPR_CLOCK_REALTIME));
+ &exec_ctx, &timers[1], 3,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)1, grpc_schedule_on_exec_ctx));
grpc_timer_init(
- &exec_ctx, &timers[2], tfm(100),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)2, grpc_schedule_on_exec_ctx),
- gpr_time_0(GPR_CLOCK_REALTIME));
+ &exec_ctx, &timers[2], 100,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)2, grpc_schedule_on_exec_ctx));
grpc_timer_init(
- &exec_ctx, &timers[3], tfm(3),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)3, grpc_schedule_on_exec_ctx),
- gpr_time_0(GPR_CLOCK_REALTIME));
+ &exec_ctx, &timers[3], 3,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)3, grpc_schedule_on_exec_ctx));
grpc_timer_init(
- &exec_ctx, &timers[4], tfm(1),
- GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)4, grpc_schedule_on_exec_ctx),
- gpr_time_0(GPR_CLOCK_REALTIME));
- GPR_ASSERT(grpc_timer_check(&exec_ctx, tfm(2), NULL) == GRPC_TIMERS_FIRED);
+ &exec_ctx, &timers[4], 1,
+ GRPC_CLOSURE_CREATE(cb, (void *)(intptr_t)4, grpc_schedule_on_exec_ctx));
+ exec_ctx.now = 2;
+ GPR_ASSERT(grpc_timer_check(&exec_ctx, NULL) == GRPC_TIMERS_FIRED);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(1 == cb_called[4][1]);
grpc_timer_cancel(&exec_ctx, &timers[0]);
diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c
index 1d051bea62..2e44d0abc8 100644
--- a/test/core/iomgr/udp_server_test.c
+++ b/test/core/iomgr/udp_server_test.c
@@ -226,7 +226,7 @@ static void test_receive(int number_of_clients) {
grpc_udp_server *s = grpc_udp_server_create(NULL);
int i;
int number_of_reads_before;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_pollset *pollsets[1];
LOG_TEST("test_receive");
gpr_log(GPR_INFO, "clients=%d", number_of_clients);
@@ -252,7 +252,8 @@ static void test_receive(int number_of_clients) {
gpr_mu_lock(g_mu);
for (i = 0; i < number_of_clients; i++) {
- deadline = grpc_timeout_seconds_to_deadline(10);
+ deadline =
+ grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
number_of_reads_before = g_number_of_reads;
/* Create a socket, send a packet to the UDP server. */
@@ -262,14 +263,13 @@ static void test_receive(int number_of_clients) {
(socklen_t)resolved_addr.len) == 0);
GPR_ASSERT(5 == write(clifd, "hello", 5));
while (g_number_of_reads == number_of_reads_before &&
- gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
+ deadline > grpc_exec_ctx_now(&exec_ctx)) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, g_pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
+ grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
gpr_mu_unlock(g_mu);
- grpc_exec_ctx_finish(&exec_ctx);
+ grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu);
}
GPR_ASSERT(g_number_of_reads == number_of_reads_before + 1);
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index 5ac58070c8..34f310142c 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -194,14 +194,13 @@ static void test_add_abunch_to_md_array(void) {
static void test_oauth2_token_fetcher_creds_parsing_ok(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response =
http_response(200, valid_oauth2_json_response);
GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
&exec_ctx, &response, &token_md, &token_lifetime) ==
GRPC_CREDENTIALS_OK);
- GPR_ASSERT(token_lifetime.tv_sec == 3599);
- GPR_ASSERT(token_lifetime.tv_nsec == 0);
+ GPR_ASSERT(token_lifetime == 3599 * GPR_MS_PER_SEC);
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(token_md), "authorization") == 0);
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(token_md),
"Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_") ==
@@ -214,7 +213,7 @@ static void test_oauth2_token_fetcher_creds_parsing_ok(void) {
static void test_oauth2_token_fetcher_creds_parsing_bad_http_status(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response =
http_response(401, valid_oauth2_json_response);
GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
@@ -227,7 +226,7 @@ static void test_oauth2_token_fetcher_creds_parsing_bad_http_status(void) {
static void test_oauth2_token_fetcher_creds_parsing_empty_http_body(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response = http_response(200, "");
GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
&exec_ctx, &response, &token_md, &token_lifetime) ==
@@ -239,7 +238,7 @@ static void test_oauth2_token_fetcher_creds_parsing_empty_http_body(void) {
static void test_oauth2_token_fetcher_creds_parsing_invalid_json(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response =
http_response(200,
"{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\","
@@ -255,7 +254,7 @@ static void test_oauth2_token_fetcher_creds_parsing_invalid_json(void) {
static void test_oauth2_token_fetcher_creds_parsing_missing_token(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response = http_response(200,
"{"
" \"expires_in\":3599, "
@@ -270,7 +269,7 @@ static void test_oauth2_token_fetcher_creds_parsing_missing_token(void) {
static void test_oauth2_token_fetcher_creds_parsing_missing_token_type(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response =
http_response(200,
"{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\","
@@ -287,7 +286,7 @@ static void test_oauth2_token_fetcher_creds_parsing_missing_token_lifetime(
void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_mdelem token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_httpcli_response response =
http_response(200,
"{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\","
@@ -555,7 +554,7 @@ static void validate_compute_engine_http_request(
static int compute_engine_httpcli_get_success_override(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
validate_compute_engine_http_request(request);
*response = http_response(200, valid_oauth2_json_response);
@@ -565,7 +564,7 @@ static int compute_engine_httpcli_get_success_override(
static int compute_engine_httpcli_get_failure_override(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
validate_compute_engine_http_request(request);
*response = http_response(403, "Not Authorized.");
@@ -575,7 +574,7 @@ static int compute_engine_httpcli_get_failure_override(
static int httpcli_post_should_not_be_called(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ const char *body_bytes, size_t body_size, grpc_millis deadline,
grpc_closure *on_done, grpc_httpcli_response *response) {
GPR_ASSERT("HTTP POST should not be called" == NULL);
return 1;
@@ -583,7 +582,7 @@ static int httpcli_post_should_not_be_called(
static int httpcli_get_should_not_be_called(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline,
+ grpc_millis deadline,
grpc_closure *on_done,
grpc_httpcli_response *response) {
GPR_ASSERT("HTTP GET should not be called" == NULL);
@@ -663,7 +662,7 @@ static void validate_refresh_token_http_request(
static int refresh_token_httpcli_post_success(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body, size_t body_size, gpr_timespec deadline,
+ const char *body, size_t body_size, grpc_millis deadline,
grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size);
*response = http_response(200, valid_oauth2_json_response);
@@ -673,7 +672,7 @@ static int refresh_token_httpcli_post_success(
static int refresh_token_httpcli_post_failure(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body, size_t body_size, gpr_timespec deadline,
+ const char *body, size_t body_size, grpc_millis deadline,
grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size);
*response = http_response(403, "Not Authorized.");
@@ -932,7 +931,7 @@ static void test_google_default_creds_refresh_token(void) {
static int default_creds_gce_detection_httpcli_get_success_override(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, "");
grpc_http_header *headers = gpr_malloc(sizeof(*headers) * 1);
@@ -996,7 +995,7 @@ static void test_google_default_creds_gce(void) {
static int default_creds_gce_detection_httpcli_get_failure_override(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
/* No magic header. */
GPR_ASSERT(strcmp(request->http.path, "/") == 0);
diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c
index 9b17fb516d..a4bfe0130e 100644
--- a/test/core/security/jwt_verifier_test.c
+++ b/test/core/security/jwt_verifier_test.c
@@ -324,7 +324,7 @@ static grpc_httpcli_response http_response(int status, char *body) {
static int httpcli_post_should_not_be_called(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ const char *body_bytes, size_t body_size, grpc_millis deadline,
grpc_closure *on_done, grpc_httpcli_response *response) {
GPR_ASSERT("HTTP POST should not be called" == NULL);
return 1;
@@ -332,7 +332,7 @@ static int httpcli_post_should_not_be_called(
static int httpcli_get_google_keys_for_email(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, good_google_email_keys());
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
@@ -379,7 +379,7 @@ static void test_jwt_verifier_google_email_issuer_success(void) {
static int httpcli_get_custom_keys_for_email(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, gpr_strdup(good_jwk_set));
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
@@ -413,7 +413,7 @@ static void test_jwt_verifier_custom_email_issuer_success(void) {
static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, gpr_strdup(good_jwk_set));
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
@@ -425,7 +425,7 @@ static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx,
static int httpcli_get_openid_config(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline,
+ grpc_millis deadline,
grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, gpr_strdup(good_openid_config));
@@ -471,7 +471,7 @@ static void on_verification_key_retrieval_error(grpc_exec_ctx *exec_ctx,
static int httpcli_get_bad_json(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
*response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}"));
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
@@ -581,7 +581,7 @@ static void test_jwt_verifier_bad_signature(void) {
static int httpcli_get_should_not_be_called(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline,
+ grpc_millis deadline,
grpc_closure *on_done,
grpc_httpcli_response *response) {
GPR_ASSERT(0);
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index d240403a29..73d6c5bc7d 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -104,8 +104,7 @@ char *grpc_test_fetch_oauth2_token_with_credentials(
"pollset_work",
grpc_pollset_work(&exec_ctx,
grpc_polling_entity_pollset(&request.pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC)))) {
+ &worker, GRPC_MILLIS_INF_FUTURE))) {
request.is_done = true;
}
}
diff --git a/test/core/security/print_google_default_creds_token.c b/test/core/security/print_google_default_creds_token.c
index 3144717a85..29c38dfdf8 100644
--- a/test/core/security/print_google_default_creds_token.c
+++ b/test/core/security/print_google_default_creds_token.c
@@ -110,8 +110,7 @@ int main(int argc, char **argv) {
"pollset_work",
grpc_pollset_work(&exec_ctx,
grpc_polling_entity_pollset(&sync.pops), &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))))
+ GRPC_MILLIS_INF_FUTURE)))
sync.is_done = true;
gpr_mu_unlock(sync.mu);
grpc_exec_ctx_flush(&exec_ctx);
diff --git a/test/core/security/ssl_server_fuzzer.c b/test/core/security/ssl_server_fuzzer.c
index 9858b11c7c..f9b754b8f2 100644
--- a/test/core/security/ssl_server_fuzzer.c
+++ b/test/core/security/ssl_server_fuzzer.c
@@ -84,8 +84,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_security_status status =
grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc);
GPR_ASSERT(status == GRPC_SECURITY_OK);
- gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(1, GPR_TIMESPAN));
+ grpc_millis deadline = GPR_MS_PER_SEC + grpc_exec_ctx_now(&exec_ctx);
struct handshake_state state;
state.done_callback_called = false;
diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c
index 5faa6352a8..cec6fb94b4 100644
--- a/test/core/security/verify_jwt.c
+++ b/test/core/security/verify_jwt.c
@@ -102,11 +102,9 @@ int main(int argc, char **argv) {
gpr_mu_lock(sync.mu);
while (!sync.is_done) {
grpc_pollset_worker *worker = NULL;
- if (!GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC))))
+ if (!GRPC_LOG_IF_ERROR("pollset_work",
+ grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
+ GRPC_MILLIS_INF_FUTURE)))
sync.is_done = true;
gpr_mu_unlock(sync.mu);
grpc_exec_ctx_flush(&exec_ctx);
diff --git a/test/core/support/BUILD b/test/core/support/BUILD
index 096576e13c..407c3eff7b 100644
--- a/test/core/support/BUILD
+++ b/test/core/support/BUILD
@@ -39,16 +39,6 @@ grpc_cc_test(
)
grpc_cc_test(
- name = "backoff_test",
- srcs = ["backoff_test.c"],
- language = "C",
- deps = [
- "//:gpr",
- "//test/core/util:gpr_test_util",
- ],
-)
-
-grpc_cc_test(
name = "cmdline_test",
srcs = ["cmdline_test.c"],
language = "C",
diff --git a/test/core/support/backoff_test.c b/test/core/support/backoff_test.c
deleted file mode 100644
index 23e3005af0..0000000000
--- a/test/core/support/backoff_test.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/support/backoff.h"
-
-#include <grpc/support/log.h>
-
-#include "test/core/util/test_config.h"
-
-static void test_constant_backoff(void) {
- gpr_backoff backoff;
- gpr_backoff_init(&backoff, 200 /* initial timeout */, 1.0 /* multiplier */,
- 0.0 /* jitter */, 100 /* min timeout */,
- 1000 /* max timeout */);
-
- gpr_timespec now = gpr_time_0(GPR_TIMESPAN);
- gpr_timespec next = gpr_backoff_begin(&backoff, now);
- GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200);
- for (int i = 0; i < 10000; i++) {
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200);
- now = next;
- }
-}
-
-static void test_min_connect(void) {
- gpr_backoff backoff;
- gpr_backoff_init(&backoff, 100 /* initial timeout */, 1.0 /* multiplier */,
- 0.0 /* jitter */, 200 /* min timeout */,
- 1000 /* max timeout */);
-
- gpr_timespec now = gpr_time_0(GPR_TIMESPAN);
- gpr_timespec next = gpr_backoff_begin(&backoff, now);
- GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 200);
-}
-
-static void test_no_jitter_backoff(void) {
- gpr_backoff backoff;
- gpr_backoff_init(&backoff, 2 /* initial timeout */, 2.0 /* multiplier */,
- 0.0 /* jitter */, 1 /* min timeout */,
- 513 /* max timeout */);
- // x_1 = 2
- // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 )
- gpr_timespec now = gpr_time_0(GPR_TIMESPAN);
- gpr_timespec next = gpr_backoff_begin(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(6, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(14, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(30, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(62, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(126, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(254, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(510, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(1022, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- // Hit the maximum timeout. From this point onwards, retries will increase
- // only by max timeout.
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(1535, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2048, GPR_TIMESPAN), next) == 0);
- now = next;
- next = gpr_backoff_step(&backoff, now);
- GPR_ASSERT(gpr_time_cmp(gpr_time_from_millis(2561, GPR_TIMESPAN), next) == 0);
-}
-
-static void test_jitter_backoff(void) {
- const int64_t initial_timeout = 500;
- const double jitter = 0.1;
- gpr_backoff backoff;
- gpr_backoff_init(&backoff, initial_timeout, 1.0 /* multiplier */, jitter,
- 100 /* min timeout */, 1000 /* max timeout */);
-
- backoff.rng_state = 0; // force consistent PRNG
-
- gpr_timespec now = gpr_time_0(GPR_TIMESPAN);
- gpr_timespec next = gpr_backoff_begin(&backoff, now);
- GPR_ASSERT(gpr_time_to_millis(gpr_time_sub(next, now)) == 500);
-
- int64_t expected_next_lower_bound =
- (int64_t)((double)initial_timeout * (1 - jitter));
- int64_t expected_next_upper_bound =
- (int64_t)((double)initial_timeout * (1 + jitter));
-
- for (int i = 0; i < 10000; i++) {
- next = gpr_backoff_step(&backoff, now);
-
- // next-now must be within (jitter*100)% of the previous timeout.
- const int64_t timeout_millis = gpr_time_to_millis(gpr_time_sub(next, now));
- GPR_ASSERT(timeout_millis >= expected_next_lower_bound);
- GPR_ASSERT(timeout_millis <= expected_next_upper_bound);
-
- expected_next_lower_bound =
- (int64_t)((double)timeout_millis * (1 - jitter));
- expected_next_upper_bound =
- (int64_t)((double)timeout_millis * (1 + jitter));
- now = next;
- }
-}
-
-int main(int argc, char **argv) {
- grpc_test_init(argc, argv);
- gpr_time_init();
-
- test_constant_backoff();
- test_min_connect();
- test_no_jitter_backoff();
- test_jitter_backoff();
-
- return 0;
-}
diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c
index ec2cd8610b..3595885ff6 100644
--- a/test/core/surface/concurrent_connectivity_test.c
+++ b/test/core/surface/concurrent_connectivity_test.c
@@ -135,14 +135,12 @@ void bad_server_thread(void *vargs) {
gpr_mu_lock(args->mu);
while (gpr_atm_acq_load(&args->stop) == 0) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec deadline =
- gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN));
+ grpc_millis deadline = grpc_exec_ctx_now(&exec_ctx) + 100;
grpc_pollset_worker *worker = NULL;
- if (!GRPC_LOG_IF_ERROR("pollset_work",
- grpc_pollset_work(&exec_ctx, args->pollset, &worker,
- now, deadline))) {
+ if (!GRPC_LOG_IF_ERROR(
+ "pollset_work",
+ grpc_pollset_work(&exec_ctx, args->pollset, &worker, deadline))) {
gpr_atm_rel_store(&args->stop, 1);
}
gpr_mu_unlock(args->mu);
diff --git a/test/core/transport/bdp_estimator_test.c b/test/core/transport/bdp_estimator_test.c
index dda48f45b1..4912ad5887 100644
--- a/test/core/transport/bdp_estimator_test.c
+++ b/test/core/transport/bdp_estimator_test.c
@@ -24,9 +24,22 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include <limits.h>
+#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/support/string.h"
#include "test/core/util/test_config.h"
+extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+
+static int g_clock = 0;
+
+static gpr_timespec fake_gpr_now(gpr_clock_type clock_type) {
+ return (gpr_timespec){
+ .tv_sec = g_clock, .tv_nsec = 0, .clock_type = clock_type,
+ };
+}
+
+static void inc_time(void) { g_clock += 30; }
+
static void test_noop(void) {
gpr_log(GPR_INFO, "test_noop");
grpc_bdp_estimator est;
@@ -44,16 +57,19 @@ static void test_get_estimate_no_samples(void) {
static void add_samples(grpc_bdp_estimator *estimator, int64_t *samples,
size_t n) {
grpc_bdp_estimator_add_incoming_bytes(estimator, 1234567);
- GPR_ASSERT(grpc_bdp_estimator_need_ping(estimator) == true);
+ inc_time();
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(grpc_bdp_estimator_need_ping(&exec_ctx, estimator) == true);
grpc_bdp_estimator_schedule_ping(estimator);
grpc_bdp_estimator_start_ping(estimator);
for (size_t i = 0; i < n; i++) {
grpc_bdp_estimator_add_incoming_bytes(estimator, samples[i]);
- GPR_ASSERT(grpc_bdp_estimator_need_ping(estimator) == false);
+ GPR_ASSERT(grpc_bdp_estimator_need_ping(&exec_ctx, estimator) == false);
}
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(1, GPR_TIMESPAN)));
- grpc_bdp_estimator_complete_ping(estimator);
+ grpc_bdp_estimator_complete_ping(&exec_ctx, estimator);
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void add_sample(grpc_bdp_estimator *estimator, int64_t sample) {
@@ -130,7 +146,9 @@ static void test_get_estimate_random_values(size_t n) {
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
+ gpr_now_impl = fake_gpr_now;
grpc_init();
+ grpc_timer_manager_set_threading(false);
test_noop();
test_get_estimate_no_samples();
test_get_estimate_1_sample();
diff --git a/test/core/transport/status_conversion_test.c b/test/core/transport/status_conversion_test.c
index 89558964c1..de8fa4458a 100644
--- a/test/core/transport/status_conversion_test.c
+++ b/test/core/transport/status_conversion_test.c
@@ -22,8 +22,13 @@
#define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \
GPR_ASSERT(grpc_status_to_http2_error(a) == (b))
-#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \
- GPR_ASSERT(grpc_http2_error_to_grpc_status(a, deadline) == (b))
+#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \
+ do { \
+ grpc_exec_ctx my_exec_ctx = GRPC_EXEC_CTX_INIT; \
+ GPR_ASSERT(grpc_http2_error_to_grpc_status(&my_exec_ctx, a, deadline) == \
+ (b)); \
+ grpc_exec_ctx_finish(&my_exec_ctx); \
+ } while (0)
#define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \
GPR_ASSERT(grpc_status_to_http2_status(a) == (b))
#define HTTP2_STATUS_TO_GRPC_STATUS(a, b) \
@@ -79,7 +84,7 @@ int main(int argc, char **argv) {
GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_UNAVAILABLE, 200);
GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_DATA_LOSS, 200);
- const gpr_timespec before_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis before_deadline = GRPC_MILLIS_INF_FUTURE;
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, before_deadline,
GRPC_STATUS_INTERNAL);
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, before_deadline,
@@ -107,7 +112,7 @@ int main(int argc, char **argv) {
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_INADEQUATE_SECURITY, before_deadline,
GRPC_STATUS_PERMISSION_DENIED);
- const gpr_timespec after_deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ const grpc_millis after_deadline = 0;
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_NO_ERROR, after_deadline,
GRPC_STATUS_INTERNAL);
HTTP2_ERROR_TO_GRPC_STATUS(GRPC_HTTP2_PROTOCOL_ERROR, after_deadline,
diff --git a/test/core/transport/timeout_encoding_test.c b/test/core/transport/timeout_encoding_test.c
index 6388ffbcec..3010c6d057 100644
--- a/test/core/transport/timeout_encoding_test.c
+++ b/test/core/transport/timeout_encoding_test.c
@@ -25,12 +25,13 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/support/murmur_hash.h"
#include "src/core/lib/support/string.h"
#include "test/core/util/test_config.h"
#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x)
-static void assert_encodes_as(gpr_timespec ts, const char *s) {
+static void assert_encodes_as(grpc_millis ts, const char *s) {
char buffer[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_http2_encode_timeout(ts, buffer);
gpr_log(GPR_INFO, "check '%s' == '%s'", buffer, s);
@@ -39,47 +40,43 @@ static void assert_encodes_as(gpr_timespec ts, const char *s) {
void test_encoding(void) {
LOG_TEST("test_encoding");
- assert_encodes_as(gpr_time_from_micros(-1, GPR_TIMESPAN), "1n");
- assert_encodes_as(gpr_time_from_seconds(-10, GPR_TIMESPAN), "1n");
- assert_encodes_as(gpr_time_from_nanos(10, GPR_TIMESPAN), "10n");
- assert_encodes_as(gpr_time_from_nanos(999999999, GPR_TIMESPAN), "1S");
- assert_encodes_as(gpr_time_from_micros(1, GPR_TIMESPAN), "1u");
- assert_encodes_as(gpr_time_from_micros(10, GPR_TIMESPAN), "10u");
- assert_encodes_as(gpr_time_from_micros(100, GPR_TIMESPAN), "100u");
- assert_encodes_as(gpr_time_from_micros(890, GPR_TIMESPAN), "890u");
- assert_encodes_as(gpr_time_from_micros(900, GPR_TIMESPAN), "900u");
- assert_encodes_as(gpr_time_from_micros(901, GPR_TIMESPAN), "901u");
- assert_encodes_as(gpr_time_from_millis(1, GPR_TIMESPAN), "1m");
- assert_encodes_as(gpr_time_from_millis(2, GPR_TIMESPAN), "2m");
- assert_encodes_as(gpr_time_from_micros(10001, GPR_TIMESPAN), "10100u");
- assert_encodes_as(gpr_time_from_micros(999999, GPR_TIMESPAN), "1S");
- assert_encodes_as(gpr_time_from_millis(1000, GPR_TIMESPAN), "1S");
- assert_encodes_as(gpr_time_from_millis(2000, GPR_TIMESPAN), "2S");
- assert_encodes_as(gpr_time_from_millis(2500, GPR_TIMESPAN), "2500m");
- assert_encodes_as(gpr_time_from_millis(59900, GPR_TIMESPAN), "59900m");
- assert_encodes_as(gpr_time_from_seconds(50, GPR_TIMESPAN), "50S");
- assert_encodes_as(gpr_time_from_seconds(59, GPR_TIMESPAN), "59S");
- assert_encodes_as(gpr_time_from_seconds(60, GPR_TIMESPAN), "1M");
- assert_encodes_as(gpr_time_from_seconds(80, GPR_TIMESPAN), "80S");
- assert_encodes_as(gpr_time_from_seconds(90, GPR_TIMESPAN), "90S");
- assert_encodes_as(gpr_time_from_minutes(2, GPR_TIMESPAN), "2M");
- assert_encodes_as(gpr_time_from_minutes(20, GPR_TIMESPAN), "20M");
- assert_encodes_as(gpr_time_from_hours(1, GPR_TIMESPAN), "1H");
- assert_encodes_as(gpr_time_from_hours(10, GPR_TIMESPAN), "10H");
- assert_encodes_as(gpr_time_from_seconds(1000000000, GPR_TIMESPAN),
- "1000000000S");
+ assert_encodes_as(-1, "1n");
+ assert_encodes_as(-10, "1n");
+ assert_encodes_as(1, "1m");
+ assert_encodes_as(10, "10m");
+ assert_encodes_as(100, "100m");
+ assert_encodes_as(890, "890m");
+ assert_encodes_as(900, "900m");
+ assert_encodes_as(901, "901m");
+ assert_encodes_as(1000, "1S");
+ assert_encodes_as(2000, "2S");
+ assert_encodes_as(2500, "2500m");
+ assert_encodes_as(59900, "59900m");
+ assert_encodes_as(50000, "50S");
+ assert_encodes_as(59000, "59S");
+ assert_encodes_as(60000, "1M");
+ assert_encodes_as(80000, "80S");
+ assert_encodes_as(90000, "90S");
+ assert_encodes_as(120000, "2M");
+ assert_encodes_as(20 * 60 * GPR_MS_PER_SEC, "20M");
+ assert_encodes_as(60 * 60 * GPR_MS_PER_SEC, "1H");
+ assert_encodes_as(10 * 60 * 60 * GPR_MS_PER_SEC, "10H");
}
-static void assert_decodes_as(const char *buffer, gpr_timespec expected) {
- gpr_timespec got;
- gpr_log(GPR_INFO, "check decoding '%s'", buffer);
+static void assert_decodes_as(const char *buffer, grpc_millis expected) {
+ grpc_millis got;
+ uint32_t hash = gpr_murmur_hash3(buffer, strlen(buffer), 0);
+ gpr_log(GPR_INFO, "check decoding '%s' (hash=0x%x)", buffer, hash);
GPR_ASSERT(1 == grpc_http2_decode_timeout(
grpc_slice_from_static_string(buffer), &got));
- GPR_ASSERT(0 == gpr_time_cmp(got, expected));
+ if (got != expected) {
+ gpr_log(GPR_ERROR, "got:'%" PRIdPTR "' != expected:'%" PRIdPTR "'", got,
+ expected);
+ abort();
+ }
}
-void decode_suite(char ext,
- gpr_timespec (*answer)(int64_t x, gpr_clock_type clock)) {
+void decode_suite(char ext, grpc_millis (*answer)(int64_t x)) {
long test_vals[] = {1, 12, 123, 1234, 12345, 123456,
1234567, 12345678, 123456789, 98765432, 9876543, 987654,
98765, 9876, 987, 98, 9};
@@ -87,41 +84,55 @@ void decode_suite(char ext,
char *input;
for (i = 0; i < GPR_ARRAY_SIZE(test_vals); i++) {
gpr_asprintf(&input, "%ld%c", test_vals[i], ext);
- assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN));
+ assert_decodes_as(input, answer(test_vals[i]));
gpr_free(input);
gpr_asprintf(&input, " %ld%c", test_vals[i], ext);
- assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN));
+ assert_decodes_as(input, answer(test_vals[i]));
gpr_free(input);
gpr_asprintf(&input, "%ld %c", test_vals[i], ext);
- assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN));
+ assert_decodes_as(input, answer(test_vals[i]));
gpr_free(input);
gpr_asprintf(&input, "%ld %c ", test_vals[i], ext);
- assert_decodes_as(input, answer(test_vals[i], GPR_TIMESPAN));
+ assert_decodes_as(input, answer(test_vals[i]));
gpr_free(input);
}
}
+static grpc_millis millis_from_nanos(int64_t x) {
+ return x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0);
+}
+static grpc_millis millis_from_micros(int64_t x) {
+ return x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0);
+}
+static grpc_millis millis_from_millis(int64_t x) { return x; }
+static grpc_millis millis_from_seconds(int64_t x) { return x * GPR_MS_PER_SEC; }
+static grpc_millis millis_from_minutes(int64_t x) {
+ return x * 60 * GPR_MS_PER_SEC;
+}
+static grpc_millis millis_from_hours(int64_t x) {
+ return x * 3600 * GPR_MS_PER_SEC;
+}
+
void test_decoding(void) {
LOG_TEST("test_decoding");
- decode_suite('n', gpr_time_from_nanos);
- decode_suite('u', gpr_time_from_micros);
- decode_suite('m', gpr_time_from_millis);
- decode_suite('S', gpr_time_from_seconds);
- decode_suite('M', gpr_time_from_minutes);
- decode_suite('H', gpr_time_from_hours);
- assert_decodes_as("1000000000S",
- gpr_time_from_seconds(1000 * 1000 * 1000, GPR_TIMESPAN));
- assert_decodes_as("1000000000000000000000u", gpr_inf_future(GPR_TIMESPAN));
- assert_decodes_as("1000000001S", gpr_inf_future(GPR_TIMESPAN));
- assert_decodes_as("2000000001S", gpr_inf_future(GPR_TIMESPAN));
- assert_decodes_as("9999999999S", gpr_inf_future(GPR_TIMESPAN));
+ decode_suite('n', millis_from_nanos);
+ decode_suite('u', millis_from_micros);
+ decode_suite('m', millis_from_millis);
+ decode_suite('S', millis_from_seconds);
+ decode_suite('M', millis_from_minutes);
+ decode_suite('H', millis_from_hours);
+ assert_decodes_as("1000000000S", millis_from_seconds(1000 * 1000 * 1000));
+ assert_decodes_as("1000000000000000000000u", GRPC_MILLIS_INF_FUTURE);
+ assert_decodes_as("1000000001S", GRPC_MILLIS_INF_FUTURE);
+ assert_decodes_as("2000000001S", GRPC_MILLIS_INF_FUTURE);
+ assert_decodes_as("9999999999S", GRPC_MILLIS_INF_FUTURE);
}
static void assert_decoding_fails(const char *s) {
- gpr_timespec x;
+ grpc_millis x;
GPR_ASSERT(0 ==
grpc_http2_decode_timeout(grpc_slice_from_static_string(s), &x));
}
diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c
index ba4028dbee..7b94ac4ada 100644
--- a/test/core/util/port_server_client.c
+++ b/test/core/util/port_server_client.c
@@ -88,7 +88,7 @@ void grpc_free_port_using_server(int port) {
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("port_server_client/free");
grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
- grpc_timeout_seconds_to_deadline(30),
+ grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC,
GRPC_CLOSURE_CREATE(freed_port_from_server, &pr,
grpc_schedule_on_exec_ctx),
&rsp);
@@ -100,8 +100,8 @@ void grpc_free_port_using_server(int port) {
if (!GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- grpc_timeout_seconds_to_deadline(1)))) {
+ &worker,
+ grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) {
pr.done = 1;
}
}
@@ -173,7 +173,7 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("port_server_client/pick_retry");
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
- grpc_timeout_seconds_to_deadline(10),
+ grpc_exec_ctx_now(exec_ctx) + 30 * GPR_MS_PER_SEC,
GRPC_CLOSURE_CREATE(got_port_from_server, pr,
grpc_schedule_on_exec_ctx),
&pr->response);
@@ -224,7 +224,7 @@ int grpc_pick_port_using_server(void) {
grpc_resource_quota_create("port_server_client/pick");
grpc_httpcli_get(
&exec_ctx, &context, &pr.pops, resource_quota, &req,
- grpc_timeout_seconds_to_deadline(30),
+ grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC,
GRPC_CLOSURE_CREATE(got_port_from_server, &pr, grpc_schedule_on_exec_ctx),
&pr.response);
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
@@ -235,8 +235,8 @@ int grpc_pick_port_using_server(void) {
if (!GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- grpc_timeout_seconds_to_deadline(1)))) {
+ &worker,
+ grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) {
pr.port = 0;
}
}
diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c
index d3a1de8a3b..611ecb330c 100644
--- a/test/core/util/test_tcp_server.c
+++ b/test/core/util/test_tcp_server.c
@@ -31,6 +31,7 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
static void on_server_destroyed(grpc_exec_ctx *exec_ctx, void *data,
grpc_error *error) {
@@ -78,14 +79,13 @@ void test_tcp_server_start(test_tcp_server *server, int port) {
void test_tcp_server_poll(test_tcp_server *server, int seconds) {
grpc_pollset_worker *worker = NULL;
- gpr_timespec deadline =
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(seconds, GPR_TIMESPAN));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_millis deadline = grpc_timespec_to_millis_round_up(
+ grpc_timeout_seconds_to_deadline(seconds));
gpr_mu_lock(server->mu);
- GRPC_LOG_IF_ERROR("pollset_work",
- grpc_pollset_work(&exec_ctx, server->pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC), deadline));
+ GRPC_LOG_IF_ERROR(
+ "pollset_work",
+ grpc_pollset_work(&exec_ctx, server->pollset, &worker, deadline));
gpr_mu_unlock(server->mu);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/test/cpp/common/alarm_cpp_test.cc b/test/cpp/common/alarm_cpp_test.cc
index 212972d25d..7adc3102f4 100644
--- a/test/cpp/common/alarm_cpp_test.cc
+++ b/test/cpp/common/alarm_cpp_test.cc
@@ -142,7 +142,7 @@ TEST(AlarmTest, ZeroExpiry) {
void* output_tag;
bool ok;
const CompletionQueue::NextStatus status = cq.AsyncNext(
- (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(0));
+ (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(1));
EXPECT_EQ(status, CompletionQueue::GOT_EVENT);
EXPECT_TRUE(ok);
@@ -158,7 +158,7 @@ TEST(AlarmTest, NegativeExpiry) {
void* output_tag;
bool ok;
const CompletionQueue::NextStatus status = cq.AsyncNext(
- (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(0));
+ (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(1));
EXPECT_EQ(status, CompletionQueue::GOT_EVENT);
EXPECT_TRUE(ok);
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index bbefbac4d0..f938aea40e 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -105,6 +105,13 @@ class Verifier {
expectations_[tag(i)] = expect_ok;
return *this;
}
+ // AcceptOnce sets the expected ok value for a specific tag, but does not
+ // require it to appear
+ // If it does, sets *seen to true
+ Verifier& AcceptOnce(int i, bool expect_ok, bool* seen) {
+ maybe_expectations_[tag(i)] = MaybeExpect{expect_ok, seen};
+ return *this;
+ }
// Next waits for 1 async tag to complete, checks its
// expectations, and returns the tag
@@ -122,12 +129,7 @@ class Verifier {
} else {
EXPECT_TRUE(cq->Next(&got_tag, &ok));
}
- auto it = expectations_.find(got_tag);
- EXPECT_TRUE(it != expectations_.end());
- if (!ignore_ok) {
- EXPECT_EQ(it->second, ok);
- }
- expectations_.erase(it);
+ GotTag(got_tag, ok, ignore_ok);
return detag(got_tag);
}
@@ -138,7 +140,7 @@ class Verifier {
// This version of Verify allows optionally ignoring the
// outcome of the expectation
void Verify(CompletionQueue* cq, bool ignore_ok) {
- GPR_ASSERT(!expectations_.empty());
+ GPR_ASSERT(!expectations_.empty() || !maybe_expectations_.empty());
while (!expectations_.empty()) {
Next(cq, ignore_ok);
}
@@ -177,16 +179,43 @@ class Verifier {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
CompletionQueue::GOT_EVENT);
}
- auto it = expectations_.find(got_tag);
- EXPECT_TRUE(it != expectations_.end());
- EXPECT_EQ(it->second, ok);
- expectations_.erase(it);
+ GotTag(got_tag, ok, false);
}
}
}
private:
+ void GotTag(void* got_tag, bool ok, bool ignore_ok) {
+ auto it = expectations_.find(got_tag);
+ if (it != expectations_.end()) {
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
+ expectations_.erase(it);
+ } else {
+ auto it2 = maybe_expectations_.find(got_tag);
+ if (it2 != maybe_expectations_.end()) {
+ if (it2->second.seen != nullptr) {
+ EXPECT_FALSE(*it2->second.seen);
+ *it2->second.seen = true;
+ }
+ if (!ignore_ok) {
+ EXPECT_EQ(it2->second.ok, ok);
+ }
+ } else {
+ gpr_log(GPR_ERROR, "Unexpected tag: %p", tag);
+ abort();
+ }
+ }
+ }
+
+ struct MaybeExpect {
+ bool ok;
+ bool* seen;
+ };
+
std::map<void*, bool> expectations_;
+ std::map<void*, MaybeExpect> maybe_expectations_;
bool spin_;
};
@@ -534,18 +563,21 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreamingWithCoalescingApi) {
service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
- auto verif = Verifier(GetParam().disable_blocking);
- verif.Expect(2, true);
-
cli_stream->Write(send_request, tag(3));
- verif.Expect(3, true);
- // Drain tag 2, optional to get tag 3 now
- while (verif.Next(cq_.get(), false) != 2) {
- }
+ bool seen3 = false;
+
+ Verifier(GetParam().disable_blocking)
+ .Expect(2, true)
+ .AcceptOnce(3, true, &seen3)
+ .Verify(cq_.get());
srv_stream.Read(&recv_request, tag(4));
- verif.Expect(4, true).Verify(cq_.get());
+
+ Verifier(GetParam().disable_blocking)
+ .AcceptOnce(3, true, &seen3)
+ .Expect(4, true)
+ .Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
@@ -570,6 +602,7 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreamingWithCoalescingApi) {
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(seen3);
}
// One ping, two pongs.
@@ -814,19 +847,21 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWAF) {
service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
- auto verif = Verifier(GetParam().disable_blocking);
- verif.Expect(2, true);
-
cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
- verif.Expect(3, true);
- // Drain tag 2, optional to get tag 3 now
- while (verif.Next(cq_.get(), false) != 2) {
- }
+ bool seen3 = false;
+
+ Verifier(GetParam().disable_blocking)
+ .Expect(2, true)
+ .AcceptOnce(3, true, &seen3)
+ .Verify(cq_.get());
srv_stream.Read(&recv_request, tag(4));
- verif.Expect(4, true).Verify(cq_.get());
+ Verifier(GetParam().disable_blocking)
+ .AcceptOnce(3, true, &seen3)
+ .Expect(4, true)
+ .Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
srv_stream.Read(&recv_request, tag(5));
@@ -845,6 +880,7 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWAF) {
Verifier(GetParam().disable_blocking).Expect(8, true).Verify(cq_.get());
EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(seen3);
}
// One ping, one pong. Using server:WriteLast api
@@ -868,19 +904,21 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWL) {
service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
- auto verif = Verifier(GetParam().disable_blocking);
- verif.Expect(2, true);
-
cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
- verif.Expect(3, true);
- // Drain tag 2, optional to get tag 3 now
- while (verif.Next(cq_.get(), false) != 2) {
- }
+ bool seen3 = false;
+
+ Verifier(GetParam().disable_blocking)
+ .Expect(2, true)
+ .AcceptOnce(3, true, &seen3)
+ .Verify(cq_.get());
srv_stream.Read(&recv_request, tag(4));
- verif.Expect(4, true).Verify(cq_.get());
+ Verifier(GetParam().disable_blocking)
+ .AcceptOnce(3, true, &seen3)
+ .Expect(4, true)
+ .Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
srv_stream.Read(&recv_request, tag(5));
@@ -901,6 +939,7 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWL) {
Verifier(GetParam().disable_blocking).Expect(9, true).Verify(cq_.get());
EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(seen3);
}
// Metadata tests
diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc
index 33b35108d2..9450182302 100644
--- a/test/cpp/end2end/generic_end2end_test.cc
+++ b/test/cpp/end2end/generic_end2end_test.cc
@@ -145,7 +145,7 @@ class GenericEnd2endTest : public ::testing::Test {
if (check_deadline) {
EXPECT_TRUE(gpr_time_similar(deadline, srv_ctx.raw_deadline(),
- gpr_time_from_millis(100, GPR_TIMESPAN)));
+ gpr_time_from_millis(1000, GPR_TIMESPAN)));
}
ByteBuffer recv_buffer;
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index cadc9b2a11..cf9a42e8c6 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -554,7 +554,7 @@ static void BM_IsolatedFilter(benchmark::State &state) {
grpc_exec_ctx_flush(&exec_ctx);
grpc_call_stack *call_stack = static_cast<grpc_call_stack *>(
gpr_zalloc(channel_stack->call_stack_size));
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
grpc_slice method = grpc_slice_from_static_string("/foo/bar");
grpc_call_final_info final_info;
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index 070034fe33..6f9dee7822 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -321,7 +321,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
- b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ b.deadline = GRPC_MILLIS_INF_FUTURE;
std::vector<grpc_mdelem> elems = Metadata::GetElems(f.exec_ctx());
std::vector<grpc_linked_mdelem> storage(elems.size());
for (size_t i = 0; i < elems.size(); i++) {
@@ -410,7 +410,7 @@ static void BM_TransportStreamSend(benchmark::State &state) {
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
- b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ b.deadline = GRPC_MILLIS_INF_FUTURE;
std::vector<grpc_mdelem> elems =
RepresentativeClientInitialMetadata::GetElems(f.exec_ctx());
std::vector<grpc_linked_mdelem> storage(elems.size());
@@ -542,7 +542,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_metadata_batch_init(&b);
grpc_metadata_batch b_recv;
grpc_metadata_batch_init(&b_recv);
- b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ b.deadline = GRPC_MILLIS_INF_FUTURE;
std::vector<grpc_mdelem> elems =
RepresentativeClientInitialMetadata::GetElems(f.exec_ctx());
std::vector<grpc_linked_mdelem> storage(elems.size());
diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
index 5c9405f583..57a69acf01 100644
--- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
+++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
@@ -73,9 +73,9 @@ static void cq_done_cb(grpc_exec_ctx* exec_ctx, void* done_arg,
/* Queues a completion tag if deadline is > 0.
* Does nothing if deadline is 0 (i.e gpr_time_0(GPR_CLOCK_MONOTONIC)) */
static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
- grpc_pollset_worker** worker, gpr_timespec now,
- gpr_timespec deadline) {
- if (gpr_time_cmp(deadline, gpr_time_0(GPR_CLOCK_MONOTONIC)) == 0) {
+ grpc_pollset_worker** worker,
+ grpc_millis deadline) {
+ if (deadline == 0) {
gpr_log(GPR_DEBUG, "no-op");
return GRPC_ERROR_NONE;
}
diff --git a/test/cpp/microbenchmarks/bm_error.cc b/test/cpp/microbenchmarks/bm_error.cc
index bd5f02e172..56b80dfcf6 100644
--- a/test/cpp/microbenchmarks/bm_error.cc
+++ b/test/cpp/microbenchmarks/bm_error.cc
@@ -159,39 +159,39 @@ BENCHMARK(BM_ErrorGetPresentInt);
// Fixtures for tests: generate different kinds of errors
class ErrorNone {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return GRPC_ERROR_NONE; }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
};
class ErrorCancelled {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return GRPC_ERROR_CANCELLED; }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
};
class SimpleError {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return error_.get(); }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
ErrorPtr error_{GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error")};
};
class ErrorWithGrpcStatus {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return error_.get(); }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
ErrorPtr error_{grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNIMPLEMENTED)};
@@ -199,11 +199,11 @@ class ErrorWithGrpcStatus {
class ErrorWithHttpError {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return error_.get(); }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
ErrorPtr error_{grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_HTTP2_ERROR,
GRPC_HTTP2_COMPRESSION_ERROR)};
@@ -211,11 +211,11 @@ class ErrorWithHttpError {
class ErrorWithNestedGrpcStatus {
public:
- gpr_timespec deadline() const { return deadline_; }
+ grpc_millis deadline() const { return deadline_; }
grpc_error* error() const { return error_.get(); }
private:
- const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ const grpc_millis deadline_ = GRPC_MILLIS_INF_FUTURE;
ErrorPtr nested_error_{grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNIMPLEMENTED)};
@@ -248,12 +248,14 @@ template <class Fixture>
static void BM_ErrorGetStatus(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_status_code status;
grpc_slice slice;
- grpc_error_get_status(fixture.error(), fixture.deadline(), &status, &slice,
- NULL);
+ grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
+ &status, &slice, NULL);
}
+ grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
}
@@ -261,11 +263,13 @@ template <class Fixture>
static void BM_ErrorGetStatusCode(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_status_code status;
- grpc_error_get_status(fixture.error(), fixture.deadline(), &status, NULL,
- NULL);
+ grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
+ &status, NULL, NULL);
}
+ grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
}
@@ -273,11 +277,13 @@ template <class Fixture>
static void BM_ErrorHttpError(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (state.KeepRunning()) {
grpc_http2_error_code error;
- grpc_error_get_status(fixture.error(), fixture.deadline(), NULL, NULL,
- &error);
+ grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(), NULL,
+ NULL, &error);
}
+ grpc_exec_ctx_finish(&exec_ctx);
track_counters.Finish(state);
}
diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
index 2656566a50..adb5e6657f 100644
--- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
@@ -29,6 +29,7 @@
extern "C" {
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/iomgr/timer_manager.h"
#include "test/core/util/trickle_endpoint.h"
}
@@ -45,6 +46,22 @@ DEFINE_int32(warmup_max_time_seconds, 10,
namespace grpc {
namespace testing {
+gpr_atm g_now_us = 0;
+
+static gpr_timespec fake_now(gpr_clock_type clock_type) {
+ gpr_timespec t;
+ gpr_atm now = gpr_atm_no_barrier_load(&g_now_us);
+ t.tv_sec = now / GPR_US_PER_SEC;
+ t.tv_nsec = (now % GPR_US_PER_SEC) * GPR_NS_PER_US;
+ t.clock_type = clock_type;
+ return t;
+}
+
+static void inc_time() {
+ gpr_atm_no_barrier_fetch_add(&g_now_us, 100);
+ grpc_timer_manager_tick();
+}
+
static void* tag(intptr_t x) { return reinterpret_cast<void*>(x); }
template <class A0>
@@ -158,6 +175,7 @@ class TrickledCHTTP2 : public EndpointPairFixture {
void Step(bool update_stats) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ inc_time();
size_t client_backlog =
grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client);
size_t server_backlog =
@@ -212,9 +230,8 @@ static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok,
int64_t iteration) {
while (true) {
fixture->Log(iteration);
- switch (fixture->cq()->AsyncNext(
- t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(100, GPR_TIMESPAN)))) {
+ switch (
+ fixture->cq()->AsyncNext(t, ok, gpr_inf_past(GPR_CLOCK_MONOTONIC))) {
case CompletionQueue::TIMEOUT:
fixture->Step(iteration != -1);
break;
@@ -289,9 +306,15 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
inner_loop(false);
}
response_rw.Finish(Status::OK, tag(1));
- need_tags = (1 << 0) | (1 << 1);
+ grpc::Status status;
+ request_rw->Finish(&status, tag(2));
+ need_tags = (1 << 0) | (1 << 1) | (1 << 2);
while (need_tags) {
TrickleCQNext(fixture.get(), &t, &ok, -1);
+ if (t == tag(0) && ok) {
+ request_rw->Read(&recv_response, tag(0));
+ continue;
+ }
int i = (int)(intptr_t)t;
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
@@ -419,8 +442,12 @@ BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs);
}
}
+extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+
int main(int argc, char** argv) {
::benchmark::Initialize(&argc, argv);
::grpc::testing::InitTest(&argc, &argv, false);
+ grpc_timer_manager_set_threading(false);
+ gpr_now_impl = ::grpc::testing::fake_now;
::benchmark::RunSpecifiedBenchmarks();
}
diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc
index 1fc1f2f83b..eab1e89480 100644
--- a/test/cpp/microbenchmarks/bm_pollset.cc
+++ b/test/cpp/microbenchmarks/bm_pollset.cc
@@ -117,11 +117,9 @@ static void BM_PollEmptyPollset(benchmark::State& state) {
gpr_mu* mu;
grpc_pollset_init(ps, &mu);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_timespec now = gpr_time_0(GPR_CLOCK_MONOTONIC);
- gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
gpr_mu_lock(mu);
while (state.KeepRunning()) {
- GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, now, deadline));
+ GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, 0));
}
grpc_closure shutdown_ps_closure;
GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
@@ -223,8 +221,6 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) {
gpr_mu* mu;
grpc_pollset_init(ps, &mu);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_timespec now = gpr_time_0(GPR_CLOCK_MONOTONIC);
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_wakeup_fd wakeup_fd;
GRPC_ERROR_UNREF(grpc_wakeup_fd_init(&wakeup_fd));
grpc_fd* wakeup = grpc_fd_create(wakeup_fd.read_fd, "wakeup_read");
@@ -245,7 +241,8 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) {
grpc_fd_notify_on_read(&exec_ctx, wakeup, continue_closure);
gpr_mu_lock(mu);
while (!done) {
- GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, NULL, now, deadline));
+ GRPC_ERROR_UNREF(
+ grpc_pollset_work(&exec_ctx, ps, NULL, GRPC_MILLIS_INF_FUTURE));
}
grpc_fd_orphan(&exec_ctx, wakeup, NULL, NULL, false /* already_closed */,
"done");
diff --git a/test/cpp/microbenchmarks/fullstack_fixtures.h b/test/cpp/microbenchmarks/fullstack_fixtures.h
index ecd28c3f8a..a7f8504505 100644
--- a/test/cpp/microbenchmarks/fullstack_fixtures.h
+++ b/test/cpp/microbenchmarks/fullstack_fixtures.h
@@ -85,7 +85,7 @@ class FullstackFixture : public BaseFixture {
}
virtual ~FullstackFixture() {
- server_->Shutdown();
+ server_->Shutdown(gpr_inf_past(GPR_CLOCK_MONOTONIC));
cq_->Shutdown();
void* tag;
bool ok;
@@ -212,7 +212,7 @@ class EndpointPairFixture : public BaseFixture {
}
virtual ~EndpointPairFixture() {
- server_->Shutdown();
+ server_->Shutdown(gpr_inf_past(GPR_CLOCK_MONOTONIC));
cq_->Shutdown();
void* tag;
bool ok;
diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc
index cc851ca9d5..7d0371bea4 100644
--- a/test/cpp/naming/resolver_component_test.cc
+++ b/test/cpp/naming/resolver_component_test.cc
@@ -199,10 +199,10 @@ void PollPollsetUntilRequestDone(ArgsStruct *args) {
grpc_pollset_worker *worker = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(args->mu);
- GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(&exec_ctx, args->pollset, &worker,
- gpr_now(GPR_CLOCK_REALTIME), NSecondDeadline(1)));
+ GRPC_LOG_IF_ERROR("pollset_work",
+ grpc_pollset_work(&exec_ctx, args->pollset, &worker,
+ grpc_timespec_to_millis_round_up(
+ NSecondDeadline(1))));
gpr_mu_unlock(args->mu);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/tools/debug/core/chttp2_ref_leak.py b/tools/debug/core/chttp2_ref_leak.py
new file mode 100755
index 0000000000..d693dd9e54
--- /dev/null
+++ b/tools/debug/core/chttp2_ref_leak.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python2.7
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Reads stdin to find chttp2_refcount log lines, and prints reference leaks
+# to stdout
+
+import collections
+import sys
+import re
+
+def new_obj():
+ return ['destroy']
+
+outstanding = collections.defaultdict(new_obj)
+
+# Sample log line:
+# chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]
+
+for line in sys.stdin:
+ m = re.search(r'chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
+ if m:
+ if m.group(1) == ' ref':
+ outstanding[m.group(2)].append(m.group(3))
+ else:
+ outstanding[m.group(2)].remove(m.group(3))
+
+for obj, remaining in outstanding.items():
+ if remaining:
+ print 'LEAKED: %s %r' % (obj, remaining)
+
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 584dd0af57..0f7e8cd3a8 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -933,6 +933,7 @@ include/grpc/support/tls_pthread.h \
include/grpc/support/useful.h \
include/grpc/support/workaround_list.h \
src/core/ext/transport/inproc/inproc_transport.h \
+src/core/lib/backoff/backoff.h \
src/core/lib/channel/channel_args.h \
src/core/lib/channel/channel_stack.h \
src/core/lib/channel/channel_stack_builder.h \
@@ -952,6 +953,7 @@ src/core/lib/debug/trace.h \
src/core/lib/http/format_request.h \
src/core/lib/http/httpcli.h \
src/core/lib/http/parser.h \
+src/core/lib/iomgr/block_annotate.h \
src/core/lib/iomgr/call_combiner.h \
src/core/lib/iomgr/closure.h \
src/core/lib/iomgr/combiner.h \
@@ -1028,8 +1030,6 @@ src/core/lib/support/arena.h \
src/core/lib/support/atomic.h \
src/core/lib/support/atomic_with_atm.h \
src/core/lib/support/atomic_with_std.h \
-src/core/lib/support/backoff.h \
-src/core/lib/support/block_annotate.h \
src/core/lib/support/env.h \
src/core/lib/support/memory.h \
src/core/lib/support/mpscq.h \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index ee593e3ea0..d4654034f2 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -1056,6 +1056,8 @@ src/core/ext/transport/inproc/inproc_plugin.cc \
src/core/ext/transport/inproc/inproc_transport.cc \
src/core/ext/transport/inproc/inproc_transport.h \
src/core/lib/README.md \
+src/core/lib/backoff/backoff.cc \
+src/core/lib/backoff/backoff.h \
src/core/lib/channel/README.md \
src/core/lib/channel/channel_args.cc \
src/core/lib/channel/channel_args.h \
@@ -1096,6 +1098,7 @@ src/core/lib/http/httpcli_security_connector.cc \
src/core/lib/http/parser.cc \
src/core/lib/http/parser.h \
src/core/lib/iomgr/README.md \
+src/core/lib/iomgr/block_annotate.h \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/call_combiner.h \
src/core/lib/iomgr/closure.cc \
@@ -1301,9 +1304,6 @@ src/core/lib/support/atomic.h \
src/core/lib/support/atomic_with_atm.h \
src/core/lib/support/atomic_with_std.h \
src/core/lib/support/avl.cc \
-src/core/lib/support/backoff.cc \
-src/core/lib/support/backoff.h \
-src/core/lib/support/block_annotate.h \
src/core/lib/support/cmdline.cc \
src/core/lib/support/cpu_iphone.cc \
src/core/lib/support/cpu_linux.cc \
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 95556b2e6f..babdfeb685 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -104,6 +104,23 @@
"gpr",
"gpr_test_util",
"grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c",
+ "name": "backoff_test",
+ "src": [
+ "test/core/backoff/backoff_test.c"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "gpr",
+ "gpr_test_util",
+ "grpc",
"grpc_test_util",
"test_tcp_server"
],
@@ -734,21 +751,6 @@
"headers": [],
"is_filegroup": false,
"language": "c",
- "name": "gpr_backoff_test",
- "src": [
- "test/core/support/backoff_test.c"
- ],
- "third_party": false,
- "type": "target"
- },
- {
- "deps": [
- "gpr",
- "gpr_test_util"
- ],
- "headers": [],
- "is_filegroup": false,
- "language": "c",
"name": "gpr_cmdline_test",
"src": [
"test/core/support/cmdline_test.c"
@@ -7773,7 +7775,6 @@
"src/core/lib/support/arena.cc",
"src/core/lib/support/atm.cc",
"src/core/lib/support/avl.cc",
- "src/core/lib/support/backoff.cc",
"src/core/lib/support/cmdline.cc",
"src/core/lib/support/cpu_iphone.cc",
"src/core/lib/support/cpu_linux.cc",
@@ -7854,8 +7855,6 @@
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
- "src/core/lib/support/backoff.h",
- "src/core/lib/support/block_annotate.h",
"src/core/lib/support/env.h",
"src/core/lib/support/memory.h",
"src/core/lib/support/mpscq.h",
@@ -7903,8 +7902,6 @@
"src/core/lib/support/atomic.h",
"src/core/lib/support/atomic_with_atm.h",
"src/core/lib/support/atomic_with_std.h",
- "src/core/lib/support/backoff.h",
- "src/core/lib/support/block_annotate.h",
"src/core/lib/support/env.h",
"src/core/lib/support/memory.h",
"src/core/lib/support/mpscq.h",
@@ -7997,6 +7994,7 @@
"language": "c",
"name": "grpc_base",
"src": [
+ "src/core/lib/backoff/backoff.cc",
"src/core/lib/channel/channel_args.cc",
"src/core/lib/channel/channel_stack.cc",
"src/core/lib/channel/channel_stack_builder.cc",
@@ -8150,6 +8148,7 @@
"include/grpc/slice_buffer.h",
"include/grpc/status.h",
"include/grpc/support/workaround_list.h",
+ "src/core/lib/backoff/backoff.h",
"src/core/lib/channel/channel_args.h",
"src/core/lib/channel/channel_stack.h",
"src/core/lib/channel/channel_stack_builder.h",
@@ -8168,6 +8167,7 @@
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
+ "src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h",
@@ -8283,6 +8283,7 @@
"include/grpc/slice_buffer.h",
"include/grpc/status.h",
"include/grpc/support/workaround_list.h",
+ "src/core/lib/backoff/backoff.h",
"src/core/lib/channel/channel_args.h",
"src/core/lib/channel/channel_stack.h",
"src/core/lib/channel/channel_stack_builder.h",
@@ -8301,6 +8302,7 @@
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
+ "src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h",
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 1fefb52f07..83418423a2 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -121,6 +121,28 @@
],
"cpu_cost": 1.0,
"exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c",
+ "name": "backoff_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ]
+ },
+ {
+ "args": [],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix",
+ "windows"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
@@ -805,28 +827,6 @@
"flaky": false,
"gtest": false,
"language": "c",
- "name": "gpr_backoff_test",
- "platforms": [
- "linux",
- "mac",
- "posix",
- "windows"
- ]
- },
- {
- "args": [],
- "ci_platforms": [
- "linux",
- "mac",
- "posix",
- "windows"
- ],
- "cpu_cost": 1.0,
- "exclude_configs": [],
- "exclude_iomgrs": [],
- "flaky": false,
- "gtest": false,
- "language": "c",
"name": "gpr_cmdline_test",
"platforms": [
"linux",
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 9b9355308a..ebbfe6c26c 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -74,6 +74,7 @@ def massage_qps_stats(scenario_result):
stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled")
stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response")
stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream")
+ stats["core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_spurious_writes_begun")
stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_indexed")
stats["core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx")
stats["core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx_v")
diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json
index 2f0fd916d4..169221d18c 100644
--- a/tools/run_tests/performance/scenario_result_schema.json
+++ b/tools/run_tests/performance/scenario_result_schema.json
@@ -382,6 +382,11 @@
},
{
"mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_hpack_recv_indexed",
"type": "INTEGER"
},
@@ -1174,6 +1179,11 @@
},
{
"mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
"name": "core_hpack_recv_indexed",
"type": "INTEGER"
},
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 1c41679827..7c65067857 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -1551,8 +1551,11 @@ class BuildAndRunError(object):
def _has_epollexclusive():
+ binary = 'bins/%s/check_epollexclusive' % args.config
+ if not os.path.exists(binary):
+ return False
try:
- subprocess.check_call('bins/%s/check_epollexclusive' % args.config)
+ subprocess.check_call(binary)
return True
except subprocess.CalledProcessError, e:
return False